prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import numpy as np
import pandas as pd
from numba import njit
import pytest
import os
from collections import namedtuple
from itertools import product, combinations
from vectorbt import settings
from vectorbt.utils import checks, config, decorators, math, array, random, enum, data, params
from tests.utils import hash
seed = 42
# ############# config.py ############# #
class TestConfig:
def test_config(self):
conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=False)
conf['b']['d'] = 2
conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=True)
conf['a'] = 2
with pytest.raises(Exception) as e_info:
conf['d'] = 2
with pytest.raises(Exception) as e_info:
conf.update(d=2)
conf.update(d=2, force_update=True)
assert conf['d'] == 2
conf = config.Config({'a': 0, 'b': {'c': 1}}, read_only=True)
with pytest.raises(Exception) as e_info:
conf['a'] = 2
with pytest.raises(Exception) as e_info:
del conf['a']
with pytest.raises(Exception) as e_info:
conf.pop('a')
with pytest.raises(Exception) as e_info:
conf.popitem()
with pytest.raises(Exception) as e_info:
conf.clear()
with pytest.raises(Exception) as e_info:
conf.update(a=2)
assert isinstance(conf.merge_with(dict(b=dict(d=2))), config.Config)
assert conf.merge_with(dict(b=dict(d=2)), read_only=True).read_only
assert conf.merge_with(dict(b=dict(d=2)))['b']['d'] == 2
conf = config.Config({'a': 0, 'b': {'c': [1, 2]}})
conf['a'] = 1
conf['b']['c'].append(3)
conf['b']['d'] = 2
assert conf == {'a': 1, 'b': {'c': [1, 2, 3], 'd': 2}}
conf.reset()
assert conf == {'a': 0, 'b': {'c': [1, 2]}}
def test_merge_dicts(self):
assert config.merge_dicts({'a': 1}, {'b': 2}) == {'a': 1, 'b': 2}
assert config.merge_dicts({'a': 1}, {'a': 2}) == {'a': 2}
assert config.merge_dicts({'a': {'b': 2}}, {'a': {'c': 3}}) == {'a': {'b': 2, 'c': 3}}
assert config.merge_dicts({'a': {'b': 2}}, {'a': {'b': 3}}) == {'a': {'b': 3}}
def test_configured(self):
class H(config.Configured):
def __init__(self, a, b=2, **kwargs):
super().__init__(a=a, b=b, **kwargs)
assert H(1).config == {'a': 1, 'b': 2}
assert H(1).copy(b=3).config == {'a': 1, 'b': 3}
assert H(1).copy(c=4).config == {'a': 1, 'b': 2, 'c': 4}
assert H(pd.Series([1, 2, 3])) == H(pd.Series([1, 2, 3]))
assert H(pd.Series([1, 2, 3])) != H(pd.Series([1, 2, 4]))
assert H(pd.DataFrame([1, 2, 3])) == H(pd.DataFrame([1, 2, 3]))
assert H(pd.DataFrame([1, 2, 3])) != H(pd.DataFrame([1, 2, 4]))
assert H(pd.Index([1, 2, 3])) == H(pd.Index([1, 2, 3]))
assert H(pd.Index([1, 2, 3])) != H(pd.Index([1, 2, 4]))
assert H(np.array([1, 2, 3])) == H(np.array([1, 2, 3]))
assert H(np.array([1, 2, 3])) != H(np.array([1, 2, 4]))
assert H(None) == H(None)
assert H(None) != H(10.)
# ############# decorators.py ############# #
class TestDecorators:
def test_class_or_instancemethod(self):
class G:
@decorators.class_or_instancemethod
def g(self_or_cls):
if isinstance(self_or_cls, type):
return True # class
return False # instance
assert G.g()
assert not G().g()
def test_custom_property(self):
class G:
@decorators.custom_property(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
def test_custom_method(self):
class G:
@decorators.custom_method(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
def test_cached_property(self):
np.random.seed(seed)
class G:
@decorators.cached_property
def cache_me(self): return np.random.uniform()
g = G()
cached_number = g.cache_me
assert g.cache_me == cached_number
class G:
@decorators.cached_property(hello="world", hello2="world2")
def cache_me(self): return np.random.uniform()
assert 'hello' in G.cache_me.kwargs
assert G.cache_me.kwargs['hello'] == 'world'
g = G()
g2 = G()
class G3(G):
pass
g3 = G3()
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
# clear_cache method
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
G.cache_me.clear_cache(g)
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
# test blacklist
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((g, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((G, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(G)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G.cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('g')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# disabled globally
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# test whitelist
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((g, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((G, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(G)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G.cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('g')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
def test_cached_method(self):
np.random.seed(seed)
class G:
@decorators.cached_method
def cache_me(self, b=10): return np.random.uniform()
g = G()
cached_number = g.cache_me
assert g.cache_me == cached_number
class G:
@decorators.cached_method(hello="world", hello2="world2")
def cache_me(self, b=10): return np.random.uniform()
assert 'hello' in G.cache_me.kwargs
assert G.cache_me.kwargs['hello'] == 'world'
g = G()
g2 = G()
class G3(G):
pass
g3 = G3()
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
# clear_cache method
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
G.cache_me.clear_cache(g)
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
# test blacklist
# function
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g.cache_me)
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((g, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((G, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(G)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G.cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('g')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# disabled globally
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# test whitelist
# function
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g.cache_me)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((g, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((G, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(G)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G.cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('g')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# disabled by non-hashable args
G.cache_me.clear_cache(g)
cached_number = g.cache_me(b=np.zeros(1))
assert g.cache_me(b=np.zeros(1)) != cached_number
def test_traverse_attr_kwargs(self):
class A:
@decorators.custom_property(some_key=0)
def a(self): pass
class B:
@decorators.cached_property(some_key=0, child_cls=A)
def a(self): pass
@decorators.custom_method(some_key=1)
def b(self): pass
class C:
@decorators.cached_method(some_key=0, child_cls=B)
def b(self): pass
@decorators.custom_property(some_key=1)
def c(self): pass
assert hash(str(decorators.traverse_attr_kwargs(C))) == 16728515581653529580
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key'))) == 16728515581653529580
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key', value=1))) == 703070484833749378
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key', value=(0, 1)))) == 16728515581653529580
# ############# checks.py ############# #
class TestChecks:
def test_is_pandas(self):
assert not checks.is_pandas(0)
assert not checks.is_pandas(np.array([0]))
assert checks.is_pandas(pd.Series([1, 2, 3]))
assert checks.is_pandas(pd.DataFrame([1, 2, 3]))
def test_is_series(self):
assert not checks.is_series(0)
assert not checks.is_series(np.array([0]))
assert checks.is_series(pd.Series([1, 2, 3]))
assert not checks.is_series(pd.DataFrame([1, 2, 3]))
def test_is_frame(self):
assert not checks.is_frame(0)
assert not checks.is_frame(np.array([0]))
assert not checks.is_frame(pd.Series([1, 2, 3]))
assert checks.is_frame(pd.DataFrame([1, 2, 3]))
def test_is_array(self):
assert not checks.is_array(0)
assert checks.is_array(np.array([0]))
assert checks.is_array(pd.Series([1, 2, 3]))
assert checks.is_array(pd.DataFrame([1, 2, 3]))
def test_is_numba_func(self):
def test_func(x):
return x
@njit
def test_func_nb(x):
return x
assert not checks.is_numba_func(test_func)
assert checks.is_numba_func(test_func_nb)
def test_is_hashable(self):
assert checks.is_hashable(2)
assert not checks.is_hashable(np.asarray(2))
def test_is_index_equal(self):
assert checks.is_index_equal(
pd.Index([0]),
pd.Index([0])
)
assert not checks.is_index_equal(
pd.Index([0]),
pd.Index([1])
)
assert not checks.is_index_equal(
pd.Index([0], name='name'),
pd.Index([0])
)
assert checks.is_index_equal(
pd.Index([0], name='name'),
pd.Index([0]),
strict=False
)
assert not checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]]),
pd.Index([0])
)
assert checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]]),
pd.MultiIndex.from_arrays([[0], [1]])
)
assert checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2']),
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2'])
)
assert not checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2']),
pd.MultiIndex.from_arrays([[0], [1]], names=['name3', 'name4'])
)
def test_is_default_index(self):
assert checks.is_default_index(pd.DataFrame([[1, 2, 3]]).columns)
assert checks.is_default_index(pd.Series([1, 2, 3]).to_frame().columns)
assert checks.is_default_index(pd.Index([0, 1, 2]))
assert not checks.is_default_index(pd.Index([0, 1, 2], name='name'))
def test_is_equal(self):
assert checks.is_equal(np.arange(3), np.arange(3), np.array_equal)
assert not checks.is_equal(np.arange(3), None, np.array_equal)
assert not checks.is_equal(None, np.arange(3), np.array_equal)
assert checks.is_equal(None, None, np.array_equal)
def test_is_namedtuple(self):
assert checks.is_namedtuple(namedtuple('Hello', ['world'])(*range(1)))
assert not checks.is_namedtuple((0,))
def test_method_accepts_argument(self):
def test(a, *args, b=2, **kwargs):
pass
assert checks.method_accepts_argument(test, 'a')
assert not checks.method_accepts_argument(test, 'args')
assert checks.method_accepts_argument(test, '*args')
assert checks.method_accepts_argument(test, 'b')
assert not checks.method_accepts_argument(test, 'kwargs')
assert checks.method_accepts_argument(test, '**kwargs')
assert not checks.method_accepts_argument(test, 'c')
def test_assert_in(self):
checks.assert_in(0, (0, 1))
with pytest.raises(Exception) as e_info:
checks.assert_in(2, (0, 1))
def test_assert_numba_func(self):
def test_func(x):
return x
@njit
def test_func_nb(x):
return x
checks.assert_numba_func(test_func_nb)
with pytest.raises(Exception) as e_info:
checks.assert_numba_func(test_func)
def test_assert_not_none(self):
checks.assert_not_none(0)
with pytest.raises(Exception) as e_info:
checks.assert_not_none(None)
def test_assert_type(self):
checks.assert_type(0, int)
checks.assert_type(np.zeros(1), (np.ndarray, pd.Series))
checks.assert_type(pd.Series([1, 2, 3]), (np.ndarray, pd.Series))
with pytest.raises(Exception) as e_info:
checks.assert_type(pd.DataFrame([1, 2, 3]), (np.ndarray, pd.Series))
def test_assert_subclass(self):
class A:
pass
class B(A):
pass
class C(B):
pass
checks.assert_subclass(B, A)
checks.assert_subclass(C, B)
checks.assert_subclass(C, A)
with pytest.raises(Exception) as e_info:
checks.assert_subclass(A, B)
def test_assert_type_equal(self):
checks.assert_type_equal(0, 1)
checks.assert_type_equal(np.zeros(1), np.empty(1))
with pytest.raises(Exception) as e_info:
checks.assert_type(0, np.zeros(1))
def test_assert_dtype(self):
checks.assert_dtype(np.zeros(1), np.float)
checks.assert_dtype(pd.Series([1, 2, 3]), np.int)
checks.assert_dtype(pd.DataFrame({'a': [1, 2], 'b': [3, 4]}), np.int)
with pytest.raises(Exception) as e_info:
checks.assert_dtype(pd.DataFrame({'a': [1, 2], 'b': [3., 4.]}), np.int)
def test_assert_subdtype(self):
checks.assert_subdtype([0], np.number)
checks.assert_subdtype(np.array([1, 2, 3]), np.number)
checks.assert_subdtype(pd.DataFrame({'a': [1, 2], 'b': [3., 4.]}), np.number)
with pytest.raises(Exception) as e_info:
checks.assert_subdtype(np.array([1, 2, 3]), np.float)
with pytest.raises(Exception) as e_info:
checks.assert_subdtype(pd.DataFrame({'a': [1, 2], 'b': [3., 4.]}), np.float)
def test_assert_dtype_equal(self):
checks.assert_dtype_equal([1], [1, 1, 1])
checks.assert_dtype_equal(pd.Series([1, 2, 3]), pd.DataFrame([[1, 2, 3]]))
checks.assert_dtype_equal(pd.DataFrame([[1, 2, 3.]]), pd.DataFrame([[1, 2, 3.]]))
with pytest.raises(Exception) as e_info:
checks.assert_dtype_equal(pd.DataFrame([[1, 2, 3]]), pd.DataFrame([[1, 2, 3.]]))
def test_assert_ndim(self):
checks.assert_ndim(0, 0)
checks.assert_ndim(np.zeros(1), 1)
checks.assert_ndim(pd.Series([1, 2, 3]), (1, 2))
checks.assert_ndim(pd.DataFrame([1, 2, 3]), (1, 2))
with pytest.raises(Exception) as e_info:
checks.assert_ndim(np.zeros((3, 3, 3)), (1, 2))
def test_assert_len_equal(self):
checks.assert_len_equal([[1]], [[2]])
checks.assert_len_equal([[1]], [[2, 3]])
with pytest.raises(Exception) as e_info:
checks.assert_len_equal([[1]], [[2], [3]])
def test_assert_shape_equal(self):
checks.assert_shape_equal(0, 1)
checks.assert_shape_equal([1, 2, 3], np.asarray([1, 2, 3]))
checks.assert_shape_equal([1, 2, 3], pd.Series([1, 2, 3]))
checks.assert_shape_equal(np.zeros((3, 3)), pd.Series([1, 2, 3]), axis=0)
checks.assert_shape_equal(np.zeros((2, 3)), pd.Series([1, 2, 3]), axis=(1, 0))
with pytest.raises(Exception) as e_info:
checks.assert_shape_equal(np.zeros((2, 3)), pd.Series([1, 2, 3]), axis=(0, 1))
def test_assert_index_equal(self):
checks.assert_index_equal(pd.Index([1, 2, 3]), pd.Index([1, 2, 3]))
with pytest.raises(Exception) as e_info:
checks.assert_index_equal(pd.Index([1, 2, 3]), pd.Index([2, 3, 4]))
def test_assert_meta_equal(self):
index = ['x', 'y', 'z']
columns = ['a', 'b', 'c']
checks.assert_meta_equal(np.array([1, 2, 3]), np.array([1, 2, 3]))
checks.assert_meta_equal(pd.Series([1, 2, 3], index=index), pd.Series([1, 2, 3], index=index))
checks.assert_meta_equal(pd.DataFrame([[1, 2, 3]], columns=columns), pd.DataFrame([[1, 2, 3]], columns=columns))
with pytest.raises(Exception) as e_info:
checks.assert_meta_equal( | pd.Series([1, 2]) | pandas.Series |
'''
Created on Mar. 9, 2021
@author: cefect
'''
import configparser, os, inspect, logging, copy, itertools, datetime
import pandas as pd
idx = pd.IndexSlice
import numpy as np
from scipy import interpolate, integrate
from hlpr.exceptions import QError as Error
from hlpr.plot import Plotr
from model.modcom import Model
class RiskModel(Plotr, Model): #common methods for risk1 and risk2
exp_ttl_colns = ('note', 'plot', 'aep')
ead_tot=''
def __init__(self,**kwargs):
self.dtag_d={**self.dtag_d,
**{
'exlikes':{'index_col':0},
}}
super().__init__(**kwargs)
#===========================================================================
# LOADERS------------
#===========================================================================
def set_exlikes(self,#loading exposure probability data (e.g. failure raster poly samples)
dtag = 'exlikes',
**kwargs
):
"""
load, fill nulls, add 1.0 for missing, set in data_d
called by:
risk1
risk2
"""
#=======================================================================
# defaults
#=======================================================================
log = self.logger.getChild('set_exlikes')
assert 'evals' in self.data_d, 'evals data set required with conditional exposure exlikes'
aep_ser = self.data_d['evals'].astype(float)
#======================================================================
# load the data-------
#======================================================================
edf = self._get_expos(dtag, log, **kwargs)
#======================================================================
# fill nulls-----
#======================================================================
"""
better not to pass any nulls.. but if so.. should treat them as ZERO!!
Null = no failure polygon = no failure
also best not to apply precision to these values
2020-01-12: moved null filling to lisamp.py
keeping it here as well for backwards compatability
"""
booldf = edf.isna()
if booldf.any().any():
log.warning('got %i (of %i) nulls!... filling with zeros'%(booldf.sum().sum(), booldf.size))
edf = edf.fillna(0.0)
#==================================================================
# check/add event probability totals----
#==================================================================
#=======================================================================
# assemble complex aeps
#=======================================================================
#collect event names
cplx_evn_d, cnt = self._get_cplx_evn(aep_ser)
assert cnt>0, 'passed \'exlikes\' but there are no complex events'
def get_cplx_df(df, aep=None, exp_l=None): #retrieve complex data helper
if exp_l is None:
exp_l = cplx_evn_d[aep]
return df.loc[:, df.columns.isin(exp_l)]
#=======================================================================
# check we dont already exceed 1
#=======================================================================
valid = True
for aep, exp_l in cplx_evn_d.items():
cplx_df = get_cplx_df(edf, exp_l=exp_l) #get data for this event
boolidx = cplx_df.sum(axis=1).round(self.prec)>1.0 #find those exceeding 1.0
if boolidx.any():
valid = False
rpt_df = cplx_df[boolidx].join(
cplx_df[boolidx].sum(axis=1).rename('sum'))
log.debug('aep%.4f: \n\n%s'%(aep, rpt_df))
log.error('aep%.4f w/ %i exEvents failed %i (of %i) Psum<1 checks (Pmax=%.2f).. see logger \n %s'%(
aep, len(exp_l), boolidx.sum(), len(boolidx),cplx_df.sum(axis=1).max(), exp_l))
assert valid, 'some complex event probabilities exceed 1'
#=======================================================================
# #identify those events that need filling
#=======================================================================
fill_exn_d = dict()
for aep, exn_l in cplx_evn_d.items():
miss_l = set(exn_l).difference(edf.columns)
if not len(miss_l)<2:
raise Error('can only fill one exposure column per complex event: %s'%miss_l)
if len(miss_l)==1:
fill_exn_d[aep] = list(miss_l)[0]
elif len(miss_l)==0:
pass #not filling any events
else: raise Error('only allowed 1 empty')
log.debug('calculating probaility for %i complex events with remaining secnodaries'%(
len(fill_exn_d)))
self.noFailExn_d =copy.copy(fill_exn_d) #set this for the probability calcs
#=======================================================================
# fill in missing
#=======================================================================
res_d = dict()
for aep, exn_miss in fill_exn_d.items():
"""typically this is a single column generated for the failure raster
but this should work for multiple failure rasters"""
#===================================================================
# legacy method
#===================================================================
if self.event_rels == 'max':
edf[exn_miss]=1
#===================================================================
# rational (sum to 1)
#===================================================================
else:
#data provided on this event
cplx_df = get_cplx_df(edf, aep=aep)
assert len(cplx_df.columns)==(len(cplx_evn_d[aep])-1), 'bad column count'
assert (cplx_df.sum(axis=1)<=1).all() #check we don't already exceed 1 (redundant)
#set remainder values
edf[exn_miss] = 1- cplx_df.sum(axis=1)
log.debug('for aep %.4f \'%s\' set %i remainder values (mean=%.4f)'%(
aep, exn_miss, len(cplx_df), edf[exn_miss].mean()))
res_d[exn_miss] = round(edf[exn_miss].mean(), self.prec)
if len(res_d)>0: log.info(
'set %i complex event conditional probabilities using remainders \n %s'%(
len(res_d), res_d))
"""NO! probabilities must sum to 1
missing column = no secondary likelihoods at all for this event.
all probabilities = 1
#identify those missing in the edf (compared with the expos)
miss_l = set(self.expcols).difference(edf.columns)
#add 1.0 for any missing
if len(miss_l) > 0:
log.info('\'exlikes\' missing %i events... setting to 1.0\n %s'%(
len(miss_l), miss_l))
for coln in miss_l:
edf[coln] = 1.0"""
log.debug('prepared edf w/ %s'%str(edf.shape))
#=======================================================================
# #check conditional probabilities sum to 1----
#=======================================================================
valid = True
for aep, exp_l in cplx_evn_d.items():
cplx_df = get_cplx_df(edf, exp_l=exp_l) #get data for this event
boolidx = cplx_df.sum(axis=1)!=1.0 #find those exceeding 1.0
if boolidx.any():
"""allowing this to mass when event_rels=max"""
valid = False
log.warning('aep%.4f failed %i (of %i) Psum<=1 checks (Pmax=%.2f)'%(
aep, boolidx.sum(), len(boolidx), cplx_df.sum(axis=1).max()))
if not self.event_rels == 'max':
assert valid, 'some complex event probabilities exceed 1'
#==================================================================
# wrap
#==================================================================
# update event type frame
"""this is a late add.. would have been nice to use this more in multi_ev
see load_evals()
"""
self.eventType_df['noFail'] = self.eventType_df['rEventName'].isin(fill_exn_d.values())
self.data_d[dtag] = edf
self.cplx_evn_d = cplx_evn_d
return
def _get_cplx_evn(self, aep_ser): #get complex event sets from aep_ser
cplx_evn_d = dict()
cnt=0
for aep in aep_ser.unique(): #those aeps w/ duplicates:
cplx_evn_d[aep] = aep_ser[aep_ser==aep].index.tolist()
cnt=max(cnt, len(cplx_evn_d[aep])) #get the size of the larget complex event
return cplx_evn_d, cnt
def set_ttl(self, # prep the raw results for plotting
tlRaw_df = None,
dtag='r_ttl',
logger=None,
):
"""
when ttl is output, we add the EAD data, drop ARI, and add plotting handles
which is not great for data manipulation
here we clean it up and only take those for plotting
see also Artr.get_ttl()
Model._fmt_resTtl()
riskPlot.load_ttl()
"""
if logger is None: logger=self.logger
log = logger.getChild('prep_ttl')
if tlRaw_df is None: tlRaw_df = self.raw_d[dtag]
#=======================================================================
# precheck
#=======================================================================
assert isinstance(tlRaw_df, pd.DataFrame)
#check the column expectations
miss_l = set(self.exp_ttl_colns).difference(tlRaw_df.columns)
assert len(miss_l)==0, 'missing some columns: %s'%miss_l
assert 'ead' in tlRaw_df.iloc[:,0].values, 'dmg_ser missing ead entry'
#=======================================================================
# column labling
#=======================================================================
"""letting the user pass whatever label for the impacts
then reverting"""
df1 = tlRaw_df.copy()
"""
TODO: harmonize this with 'impact_units' loaded from control file
"""
self.impact_name = list(df1.columns)[1] #get the label for the impacts
newColNames = list(df1.columns)
newColNames[1] = 'impacts'
df1.columns = newColNames
#=======================================================================
# #get ead
#=======================================================================
bx = df1['aep'] == 'ead' #locate the ead row
assert bx.sum()==1
self.ead_tot = df1.loc[bx, 'impacts'].values[0]
assert not pd.isna(self.ead_tot)
assert isinstance(self.ead_tot, float), '%s got bad type on ead_tot: %s'%(self.name, type(self.ead_tot))
#=======================================================================
# #get plot values
#=======================================================================
df2 = df1.loc[df1['plot'], :].copy() #drop those not flagged for plotting
#typeset aeps
df2.loc[:, 'aep'] = df2['aep'].astype(np.float64).round(self.prec)
#=======================================================================
# #invert aep (w/ zero handling)
#=======================================================================
self._get_ttl_ari(df2)
#=======================================================================
# re-order
#=======================================================================
log.debug('finished w/ %s'%str(df2.shape))
ttl_df = df2.loc[:, sorted(df2.columns)].sort_values('ari', ascending=True)
#shortcut for datachecks
df1 = ttl_df.loc[:, ('aep', 'note')]
df1['extrap']= df1['note']=='extrap'
self.aep_df = df1.drop('note', axis=1) #for checking
self.data_d[dtag] = ttl_df.copy()
return ttl_df
#===========================================================================
# CALCULATORS-------
#===========================================================================
def ev_multis(self, #calculate (discrete) expected value from events w/ multiple exposure sets
ddf, #damages per exposure set (
edf, #secondary liklihoods per exposure set ('exlikes'). see load_exlikes()
# nulls were replaced by 0.0 (e.g., asset not provided a secondary probability)
# missing colums were replaced by 1.0 (e.g., non-failure events)
aep_ser,
event_rels=None, #ev calculation method
#WARNING: not necessarily the same as the parameter used by LikeSampler
#max: maximum expected value of impacts per asset from the duplicated events
#resolved damage = max(damage w/o fail, damage w/ fail * fail prob)
#default til 2020-12-30
#mutEx: assume each event is mutually exclusive (only one can happen)
#lower bound
#indep: assume each event is independent (failure of one does not influence the other)
#upper bound
logger=None,
):
"""
we accept multiple exposure sets for a single event
e.g. 'failure' raster and 'no fail'
where each exposure set is assigned a conditional probability in 'exlikes' (edf)
e.g. exlikes=1.0 means only one exposure set
for resolving conditional probabilities for a single exposure set:
see build.lisamp.LikeSampler.run()
(no impacts)
view(edf)
"""
#======================================================================
# defaults
#======================================================================
if logger is None: logger=self.logger
log = logger.getChild('ev_multis')
cplx_evn_d = self.cplx_evn_d #{aep: [eventName1, eventName2,...]}
if event_rels is None: event_rels = self.event_rels
#======================================================================
# precheck
#======================================================================
assert isinstance(cplx_evn_d, dict)
assert len(cplx_evn_d)>0
assert (edf.max(axis=1)<=1).all(), 'got probs exceeding 1'
assert (edf.min(axis=1)>=0).all(), 'got negative probs'
assert ddf.shape == edf.shape, 'shape mismatch'
"""where edf > 0 ddf should also be > 0
but leave this check for the input validator"""
#======================================================================
# get expected values of all damages
#======================================================================
""" skip this based on event_rels?"""
evdf = ddf*edf
log.info('resolving EV w/ %s, %i event sets, and event_rels=\'%s\''%(
str(evdf.shape), len(cplx_evn_d), event_rels))
assert not evdf.isna().any().any()
assert evdf.min(axis=1).min()>=0
#======================================================================
# loop by unique aep and resolve-----
#======================================================================
res_df = pd.DataFrame(index=evdf.index, columns = aep_ser.unique().tolist())
meta_d = dict()
#for indxr, aep in enumerate(aep_ser.unique().tolist()):
for indxr, (aep, exn_l) in enumerate(cplx_evn_d.items()):
self.exn_max = max(self.exn_max, len(exn_l)) #use don plot
#===================================================================
# setup
#===================================================================
self.feedback.setProgress((indxr/len(aep_ser.unique())*80))
assert isinstance(aep, float)
if not event_rels=='max':
if not (edf.loc[:, exn_l].sum(axis=1).round(self.prec) == 1.0).all():
raise Error('aep %.4f probabilities fail to sum'%aep)
log.debug('resolving aep %.4f w/ %i event names: %s'%(aep, len(exn_l), exn_l))
"""
view(self.att_df)
"""
#===================================================================
# simple events.. nothing to resolve----
#===================================================================
if len(exn_l) == 1:
"""
where hazard layer doesn't have a corresponding failure layer
"""
res_df.loc[:, aep] = evdf.loc[:, exn_l].iloc[:, 0]
meta_d[aep] = 'simple noFail'
"""no attribution modification required"""
#===================================================================
# one failure possibility-----
#===================================================================
elif len(exn_l) == 2:
if event_rels == 'max':
"""special legacy method... see below"""
res_df.loc[:, aep] = evdf.loc[:, exn_l].max(axis=1)
else:
"""where we only have one failure event
events are mutually exclusive by default"""
res_df.loc[:, aep] = evdf.loc[:, exn_l].sum(axis=1)
meta_d[aep] = '1 fail'
#===================================================================
# complex events (more than 2 failure event)----
#===================================================================
else:
"""
view(edf.loc[:, exn_l])
view(ddf.loc[:, exn_l])
view(evdf.loc[:, exn_l])
"""
log.info('resolving alternate damages for aep %.2e from %i events: \n %s'%(
aep, len(exn_l), exn_l))
#===============================================================
# max
#===============================================================
if event_rels == 'max':
"""
matching 2020 function
taking the max EV on each asset
where those rasters w/o exlikes P=1 (see load_exlikes())
WARNING: this violates probability logic
"""
res_df.loc[:, aep] = evdf.loc[:, exn_l].max(axis=1)
#===============================================================
# mutex
#===============================================================
elif event_rels == 'mutEx':
res_df.loc[:, aep] = evdf.loc[:, exn_l].sum(axis=1)
#===============================================================
# independent
#===============================================================
elif event_rels == 'indep':
"""
NOTE: this is a very slow routine
TODO: parallel processing
"""
#identify those worth calculating
bx = np.logical_and(
(edf.loc[:, exn_l]>0).sum(axis=1)>1, #with multiple real probabilities
ddf.loc[:,exn_l].sum(axis=1).round(self.prec)>0 #with some damages
)
#build the event type flags
etype_df = pd.Series(index=exn_l, dtype=np.bool, name='mutEx').to_frame()
#mark the failure event
etype_df.loc[etype_df.index.isin(self.noFailExn_d.values()), 'mutEx']=True
assert etype_df.iloc[:,0].sum()==1
"""todo: consider using 'apply'
tricky w/ multiple data frames...."""
log.info('aep %.4f calculating %i (of %i) EVs from %i events w/ indepedence'%(
aep, bx.sum(), len(bx), len(exn_l)))
#loop and resolve each asset
for cindx, pser in edf.loc[bx, exn_l].iterrows():
#assemble the prob/consq set for this asset
inde_df = pser.rename('prob').to_frame().join(
ddf.loc[cindx, exn_l].rename('consq').to_frame()
).join(etype_df)
#resolve for this asset
res_df.loc[cindx, aep] = self._get_indeEV(inde_df)
#fill in remainderes
assert res_df.loc[~bx, aep].isna().all()
res_df.loc[~bx, aep] = evdf.loc[~bx, exn_l].max(axis=1)
else: raise Error('bad event_rels: %s'%event_rels)
#===============================================================
# wrap complex
#===============================================================
meta_d[aep] = 'complex fail'
#===================================================================
# wrap this aep
#===================================================================
if res_df[aep].isna().any():
raise Error('got nulls on %s'%aep)
#=======================================================================
# # check
#=======================================================================
assert res_df.min(axis=1).min()>=0
if not res_df.notna().all().all():
raise Error('got %i nulls'%res_df.isna().sum().sum())
#=======================================================================
# attribution------
#=======================================================================
if self.attriMode:
atr_dxcol_raw = self.att_df.copy()
mdex = atr_dxcol_raw.columns
nameRank_d= {lvlName:i for i, lvlName in enumerate(mdex.names)}
edf = edf.sort_index(axis=1, ascending=False)
if event_rels == 'max':
"""
turns out we need to get the ACTUAL expected value matrix
here we reconstruct by gettin a max=0, no=1, shared=0.5 matrix
then mutiplyling that by the evdf to get the ACTUAL ev matrix"""
#===============================================================
# build multipler (boolean based on max)
#===============================================================
mbdxcol=None
for aep, gdf in atr_dxcol_raw.groupby(level=0, axis=1):
#get events on this aep
exn_l = gdf.columns.remove_unused_levels().levels[nameRank_d['rEventName']]
#identify maximums
booldf = evdf.loc[:, exn_l].isin(evdf.loc[:, exn_l].max(axis=1)).astype(int)
#handle duplicates (assign equal portion)
if len(exn_l)>1:
boolidx = booldf.eq(booldf.iloc[:,0], axis=0).all(axis=1)
booldf.loc[boolidx, :] = float(1/len(exn_l))
#add in the dummy lvl0 aep
bdxcol = pd.concat([booldf], keys=[aep], axis=1)
if mbdxcol is None:
mbdxcol = bdxcol
else:
mbdxcol = mbdxcol.merge(bdxcol, how='outer', left_index=True, right_index=True)
log.debug('%.4f: got %s'%(aep, str(mbdxcol.shape)))
#check it
self.check_attrimat(atr_dxcol=mbdxcol, logger=log)
#===============================================================
# apply multiplication
#===============================================================
#get EV from this
evdf1 = mbdxcol.multiply(evdf, axis='column', level=1).droplevel(level=0, axis=1)
elif event_rels=='mutEx':
evdf1=evdf
elif event_rels=='indep':
raise Error('attribution not implemented for event_rels=\'indep\'')
else: raise Error('bad evnet-Rels')
#===================================================================
# common
#===================================================================
#multiply thorugh to get all the expected value components
i_dxcol = atr_dxcol_raw.multiply(evdf1, axis='columns', level=1)
#divide by the event totals to get ratios back
atr_dxcol = i_dxcol.divide(res_df, axis='columns', level='aep')
#apportion null values
atr_dxcol = self._attriM_nulls(res_df, atr_dxcol, logger=log)
self.att_df = atr_dxcol
#======================================================================
# wrap
#======================================================================
#find those w/ zero fail
bx = res_df.max(axis=1)==0
log.info('resolved %i asset (%i w/ pfail=0) to %i unique event damages to \n %s'%(
len(bx), bx.sum(), len(res_df.columns), res_df.mean(axis=0).to_dict()))
return res_df.sort_index(axis=1)
def calc_ead(self, #get EAD from a set of impacts per event
df_raw, #xid: aep
ltail = None,
rtail = None,
drop_tails = None, #whether to remove the dummy tail values from results
dx = None, #damage step for integration (default:None)
logger = None
):
"""
#======================================================================
# inputs
#======================================================================
ltail: left tail treatment code (low prob high damage)
flat: extend the max damage to the zero probability event
extrapolate: extend the fucntion to the zero aep value (interp1d)
float: extend the function to this damage value (must be greater than max)
none: don't extend the tail (not recommended)
rtail: right trail treatment (high prob low damage)
extrapolate: extend the function to the zero damage value
float: extend the function to this aep
none: don't extend (not recommended)
"""
#======================================================================
# setups and defaults
#======================================================================
if logger is None: logger = self.logger
log = logger.getChild('calc_ead')
if ltail is None: ltail = self.ltail
if rtail is None: rtail = self.rtail
if drop_tails is None: drop_tails=self.drop_tails
assert isinstance(drop_tails, bool)
#format tail values
assert not ltail is None
assert not rtail is None
if not ltail in ['flat', 'extrapolate', 'none']:
try:
ltail = float(ltail)
except Exception as e:
raise Error('failed to convert \'ltail\'=\'%s\' to numeric \n %s'%(ltail, e))
if not rtail in ['extrapolate', 'none']:
rtail = float(rtail)
log.info('getting ead on %s w/ ltail=\'%s\' and rtail=\'%s\''%(
str(df_raw.shape), ltail, rtail))
#=======================================================================
# data prep-----
#=======================================================================
"""
view(df_raw)
"""
df = df_raw.copy().sort_index(axis=1, ascending=False)
#=======================================================================
# no value----
#=======================================================================
"""
this can happen for small inventories w/ no failure probs
"""
#identify columns to calc ead for
bx = (df > 0).any(axis=1) #only want those with some real damages
if not bx.any():
log.warning('%s got no positive damages %s'%(self.tag, str(df.shape)))
#apply dummy tails as 'flat'
if not ltail is None:
df.loc[:,0] = df.iloc[:,0]
if not rtail is None:
aep_val = max(df.columns.tolist())*(1+10**-(self.prec+2))
df[aep_val] = 0
#re-arrange columns so x is ascending
df = df.sort_index(ascending=False, axis=1)
#apply dummy ead
df['ead'] = 0
#=======================================================================
# some values---------
#=======================================================================
else:
#=======================================================================
# get tail values-----
#=======================================================================
self.check_eDmg(df, dropna=True, logger=log)
#======================================================================
# left tail
#======================================================================
#flat projection
if ltail == 'flat':
df.loc[:,0] = df.iloc[:,0]
if len(df)==1:
self.extrap_vals_d[0] = df.loc[:,0].mean().round(self.prec) #store for later
elif ltail == 'extrapolate': #DEFAULT
df.loc[bx,0] = df.loc[bx, :].apply(self._extrap_rCurve, axis=1, left=True)
#extrap vqalue will be different for each entry
if len(df)==1:
self.extrap_vals_d[0] = df.loc[:,0].mean().round(self.prec) #store for later
elif isinstance(ltail, float):
"""this cant be a good idea...."""
df.loc[bx,0] = ltail
self.extrap_vals_d[0] = ltail #store for later
elif ltail == 'none':
pass
else:
raise Error('unexected ltail key'%ltail)
#======================================================================
# right tail
#======================================================================
if rtail == 'extrapolate':
"""just using the average for now...
could extraploate for each asset but need an alternate method"""
aep_ser = df.loc[bx, :].apply(
self._extrap_rCurve, axis=1, left=False)
aep_val = round(aep_ser.mean(), 5)
assert aep_val > df.columns.max()
df.loc[bx, aep_val] = 0
log.info('using right intersection of aep= %.2e from average extraploation'%(
aep_val))
self.extrap_vals_d[aep_val] = 0 #store for later
elif isinstance(rtail, float): #DEFAULT
aep_val = round(rtail, 5)
assert aep_val > df.columns.max(), 'passed rtail value (%.2f) not > max aep (%.2f)'%(
aep_val, df.columns.max())
df.loc[bx, aep_val] = 0
log.debug('setting ZeroDamage event from user passed \'rtail\' aep=%.7f'%(
aep_val))
self.extrap_vals_d[aep_val] = 0 #store for later
elif rtail == 'flat':
#set the zero damage year as the lowest year in the model (with a small buffer)
aep_val = max(df.columns.tolist())*(1+10**-(self.prec+2))
df.loc[bx, aep_val] = 0
log.info('rtail=\'flat\' setting ZeroDamage event as aep=%.7f'%aep_val)
elif rtail == 'none':
log.warning('no rtail extrapolation specified! leads to invalid integration bounds!')
else:
raise Error('unexpected rtail %s'%rtail)
#re-arrange columns so x is ascending
df = df.sort_index(ascending=False, axis=1)
#======================================================================
# check again
#======================================================================
self.check_eDmg(df, dropna=True, logger=log)
#======================================================================
# calc EAD-----------
#======================================================================
#get reasonable dx (integration step along damage axis)
"""todo: allow the user to set t his"""
if dx is None:
dx = df.max().max()/100
assert isinstance(dx, float)
#apply the ead func
df.loc[bx, 'ead'] = df.loc[bx, :].apply(
self._get_ev, axis=1, dx=dx)
df.loc[:, 'ead'] = df['ead'].fillna(0) #fill remander w/ zeros
#======================================================================
# check it
#======================================================================
boolidx = df['ead'] < 0
if boolidx.any():
log.warning('got %i (of %i) negative eads'%( boolidx.sum(), len(boolidx)))
"""
df.columns.dtype
"""
#======================================================================
# clean results
#======================================================================
if drop_tails:
#just add the results values onto the raw
res_df = df_raw.sort_index(axis=1, ascending=False).join(df['ead']).round(self.prec)
else:
#take everything
res_df = df.round(self.prec)
#final check
"""nasty conversion because we use aep as a column name..."""
cdf = res_df.drop('ead', axis=1)
cdf.columns = cdf.columns.astype(float)
self.check_eDmg(cdf, dropna=True, logger=log)
return res_df
def _get_indeEV(self,
inde_df #prob, consq, mutual exclusivity flag for each exposure event
):
"""
get the expected value at an asset with
n>1 indepednet failure events (w/ probabilities)
and 1 noFail event
"""
#=======================================================================
# prechecks
#=======================================================================
#check the columns
miss_l = set(['prob', 'consq', 'mutEx']).symmetric_difference(inde_df.columns)
assert len(miss_l)==0
#=======================================================================
# failures---------
#=======================================================================
bxf = ~inde_df['mutEx']
#=======================================================================
# assemble complete scenario matrix
#=======================================================================
n = len(inde_df[bxf])
#build it
if not n in self.scen_ar_d:
scenFail_ar = np.array([i for i in itertools.product(['yes','no'], repeat=n)])
self.scen_ar_d[n] = copy.copy(scenFail_ar)
#retrieve pre-built
else:
scenFail_ar = copy.copy(self.scen_ar_d[n])
#=======================================================================
# probs
#=======================================================================
sFailP_df = pd.DataFrame(scenFail_ar, columns=inde_df[bxf].index)
#expand probabilities to mathc size
prob_ar = np.tile(inde_df.loc[bxf, 'prob'].to_frame().T.values, (len(sFailP_df), 1))
#swap in positives
sFailP_df = sFailP_df.where(
np.invert(sFailP_df=='yes'),
prob_ar, inplace=False)
#swap in negatives
sFailP_df = sFailP_df.where(
np.invert(sFailP_df=='no'),
1-prob_ar, inplace=False).astype(np.float64)
#combine
sFailP_df['pTotal'] = sFailP_df.prod(axis=1)
assert round(sFailP_df['pTotal'].sum(), self.prec)==1, inde_df
#=======================================================================
# consequences
#=======================================================================
sFailC_df = | pd.DataFrame(scenFail_ar, columns=inde_df[bxf].index) | pandas.DataFrame |
import json
import pandas as pd
try:
from urllib.request import urlopen
from urllib.error import URLError, HTTPError
except ImportError:
from urllib2 import urlopen, URLError, HTTPError
def get_form(api_key, typeform_id, options=None):
typeform_url = "https://api.typeform.com/v1/form/"
typeform_url += str(typeform_id) + "?key=" + str(api_key)
filters = ['completed', 'limit', 'offset', 'order_by', 'order_by[]', 'since', 'token', 'until']
if options:
if not isinstance(options, dict):
raise TypeError("Options must be a dictionary!")
for key, value in options.items():
if key not in filters: continue
typeform_url += '&{0}={1}'.format(key, value)
try:
response = urlopen(typeform_url)
raw_data = response.read().decode('utf-8')
return json.loads(raw_data)
except HTTPError as e:
print("HTTPError: %s" % e.code)
except URLError as e:
print("URLError: %s" % e.reason)
except Exception:
import traceback
print("generic exception: {0}".format(traceback.format_exc()))
return {}
def keyerror(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except KeyError:
print("Key not found!")
return wrapper
class Typeform:
def __init__(self, api_key):
self.api_key = api_key
def all_forms(self, format=None):
typeform_url = "https://api.typeform.com/v1/forms?key="
typeform_url += str(self.api_key)
api_response = urlopen(typeform_url)
raw_data = api_response.read().decode('utf-8')
json_data = json.loads(raw_data)
if format is list: return json_data
typeform_df = pd.DataFrame(json_data)
return typeform_df
@keyerror
def answers(self, typeform_id, format=None, options=None):
typeform_responses = self.responses(typeform_id, options)
typeform_answers = [response['answers'] for response in typeform_responses if 'answers' in response]
typeform_answers = [answer for answer in typeform_answers if answer != {}]
if format is list: return typeform_answers
answers_df = pd.DataFrame(typeform_answers)
return answers_df
@keyerror
def questions(self, typeform_id, format=None, options=None):
api_response = get_form(self.api_key, typeform_id, options)
qs = api_response['questions']
if format is list: return qs
questions_df = | pd.DataFrame(qs) | pandas.DataFrame |
# Local
from sheetreader import get_index
# Internal
from collections import Counter, defaultdict
import json
import operator
import os
import re
from itertools import combinations, cycle
# External
from tabulate import tabulate
from matplotlib import pyplot as plt
import seaborn as sns
import pandas as pd
################################################################################
# Output
def write_json(object, filename):
"Write object to a JSON file."
with open(f'./Data/{filename}', 'w') as f:
json.dump(object, f, indent=2)
def write_table(rows, headers, filename):
"Write LaTeX table to a file."
table = tabulate(rows, headers=headers, tablefmt='latex_booktabs')
with open(f'./Tables/{filename}', 'w') as f:
f.write(table)
def write_frequency_table(counter, headers, filename):
"Write a frequency table to a file, in LaTeX format."
write_table(rows=counter.most_common(),
headers=headers,
filename=filename)
################################################################################
# Statistics
# TODO:
# * Compute percentages to put in the text. This could be done using the counters
# that are already provided in the main function.
# * Compute min, max, most frequent scale size (key: op_instrument_size)
#
# DONE:
# * Confusion tables, showing which criteria get confused the most
# * Plot of how often authors specify criteria/definitions
# * Frequency tables:
# - Languages
# - Tasks
# - Output formats
# - Author-specified criteria
# - Criteria paraphrase
# - Languages
# - Statistics used
def unique_count_contents(index, key):
# Counts unique number of instances a per paper level:
unique_counter = Counter()
for paper, rows in index.items():
for row in rows:
value_counter = count_contents({paper: [row]}, key)
for value_key in value_counter.keys():
if value_key in unique_counter:
unique_counter[value_key] = unique_counter[value_key] + 1
else:
unique_counter[value_key] = 1
return unique_counter
def count_all_contents(index, key):
# Counts all instances for a given column key:
all_counter = Counter()
for paper, rows in index.items():
for row in rows:
value_counter = count_contents({paper: [row]}, key, True)
for value_key, count in value_counter.items():
if value_key in all_counter:
all_counter[value_key] = all_counter[value_key] + count
else:
all_counter[value_key] = count
return all_counter
def count_contents(index, key, count_blank=False):
# Counts the number of times that values occur in a particular column:
value_counter = Counter()
for paper, rows in index.items():
value = rows[0][key]
value = value.lower()
if 'Multiple' in value or 'multiple' in value:
if key != "system_output":
value = " ".join(value.split(":")[1:])
items = value.strip().split(', ')
value_counter.update([item.strip() for item in items])
elif key == "system_output":
value = value.replace("multiple (list all):", "").strip()
items = value.strip().split(', ')
value_counter.update([item.strip() for item in items])
elif value not in {"", "none given", "not given", "blank"}:
value = value.strip()
value_counter[value] += 1
elif value in {"", "none given", "not given", "blank"} and count_blank:
value = "None Given/Blank"
value_counter[value] += 1
return value_counter
def count_contents_paraphase(index, key):
# Counts the number of times that values occur in a particular column:
value_counter = Counter()
for paper, rows in index.items():
value = rows[0][key]
value = value.strip()
if 'Multiple' in value or 'multiple' in value:
value = " ".join(value.split(":")[1:])
value = value.replace("-", "").strip()
value = re.sub(r'(\d*\/*\d*\w\.) ', ' ', value)
items = value.strip().split(', ')
value_counter.update([item.strip() for item in items])
elif value != "":
value = value.replace("-", "").strip()
value = re.sub(r'(\d*\/*\d*\w\.) ', ' ', value)
value = value.strip()
value_counter[value] += 1
return value_counter
def count_statistic(index, key):
# Counts the number of times that values occur in a particular column""
value_counter = Counter()
for paper, rows in index.items():
value = rows[0][key]
value = value.lower()
if value != "" and value != "none given":
split_crit = ""
if "," in value:
split_crit = ","
else:
split_crit = ";"
items = value.split(split_crit)
for item in items:
value_counter[item.strip()] += 1
return value_counter
stats_normalisation_dict = {"SD": "standard deviation",
"standard dev": "standard deviation",
"Mean": "mean",
"means": "mean",
"raw counts": "raw numbers"}
def normalise_statistics_terms(term: str):
if term in stats_normalisation_dict:
return [stats_normalisation_dict[term]]
if "ANOVA" in term:
return ["ANOVA"]
if "Kruskal-Wallis" in term:
return ["Kruskall-Wallis"]
if re.match("[Aa]nalysis [Oo]f [Vv]ariance", term):
return ["ANOVA"]
if re.match("Mann-Whitney.*U", term):
return ["Mann-Whitney U-test"]
if re.match("[Cc]hi[\- ]sq", term):
return ["Chi-squared"]
if re.match("[Rr]atio", term):
return ['ratio']
if "percentage" in term:
return ["proportion"]
if "specif" in term:
return ['underspecified']
if term.startswith("t-test"):
return ['t-test']
return [term]
def count_statistic_modified(index, key):
# Counts the number of times that values occur in a particular column:
value_counter = Counter()
for paper, rows in index.items():
value = rows[0][key]
if value != "" and value != "none given":
split_crit = ""
if "," in value:
split_crit = ","
else:
split_crit = ";"
value = re.sub("(2|two|Two)-tail(ed)*", "two-tailed", value)
items = value.split(split_crit)
for item in items:
normalised = normalise_statistics_terms(item.strip())
for x in normalised:
value_counter[x] += 1
return value_counter
def convert_percent(counter_values):
# Converts the values of the counter into percentages:
counts = list(counter_values.values())
sum_counts = sum(counts)
percent_counter = Counter()
for k, v in counter_values.items():
if k not in percent_counter:
percent_counter[k] = round((v / sum_counts) * 100, 2)
return percent_counter
def count_empty(index, key):
"""
Count for any key how many papers leave the value unspecified.
Note that this function only checks the first of the rows corresponding to a paper.
"""
count = 0
for paper, rows in index.items():
value = rows[0][key]
if value in {"", "none given", "not given", "blank", "unclear"}:
count += 1
return count
def year_wise_counts(index, key, filename):
"""
Calculates year wise counts (before and after 2010) and writes it a latex file
Inputs -
Index - excel file
Key - column which needs to be calculated
filename - filename of the tex file
Outputs:
A latex file of the counts containing the criterion, before 2010, after 2010, total values
"""
b4_value_counter = Counter()
after_value_counter = Counter()
for paper, rows in index.items():
value = rows[0][key]
value = value.lower()
year = rows[0]["pub_year"]
if 'Multiple' in value or 'multiple' in value and key != "system_output":
value = " ".join(value.split(":")[1:])
items = value.strip().split(', ')
if year < 2010:
b4_value_counter.update([item.strip() for item in items])
else:
after_value_counter.update([item.strip() for item in items])
elif value != "":
if year < 2010:
b4_value_counter[value] += 1
else:
after_value_counter[value] += 1
rows = []
languages = list(set(list(b4_value_counter.keys()) + list(after_value_counter.keys())))
for lang in languages:
if lang in b4_value_counter.keys():
b4_value = b4_value_counter[lang]
else:
b4_value = 0
if lang in after_value_counter.keys():
after_value = after_value_counter[lang]
else:
after_value = 0
rows.append([lang, b4_value, after_value, b4_value + after_value])
rows.sort(key=lambda k: k[3], reverse=True)
headers = ['Criterion', 'Before 2010', 'After 2010', "Total"]
write_table(rows, headers, filename)
def year_wise_verbatim_def_counts(index, filename):
"""
Calculates year wise counts (before and after 2010) of the verbatim criterion and writes it a latex file
Inputs -
Index - excel file
filename - filename of the tex file
Outputs:
A latex file of the counts containing the criterion, before 2010, after 2010, total values
"""
b4_value_counter = Counter()
after_value_counter = Counter()
for paper, rows in index.items():
for row in rows:
verbatim = row["criterion_verbatim"].lower().strip()
definition = row["criterion_definition_verbatim"].lower().strip()
year = row["pub_year"]
if verbatim != "" and verbatim != "none given" and verbatim != "not given":
if definition != "" and definition != "none given" and definition != "not given":
if year < 2010:
if verbatim in b4_value_counter.keys():
b4_value_counter[verbatim] = b4_value_counter[verbatim] + 1
else:
b4_value_counter[verbatim] = 1
else:
if verbatim in after_value_counter.keys():
after_value_counter[verbatim] = after_value_counter[verbatim] + 1
else:
after_value_counter[verbatim] = 1
rows = []
all_criterion = list(set(list(after_value_counter.keys()) + list(b4_value_counter.keys())))
for crit in all_criterion:
if crit in b4_value_counter.keys():
b4_value = b4_value_counter[crit]
else:
b4_value = 0
if crit in after_value_counter.keys():
after_value = after_value_counter[crit]
else:
after_value = 0
rows.append([crit, b4_value, after_value, b4_value + after_value])
rows.sort(key=lambda k: k[3], reverse=True)
headers = ['Criterion', 'Before 2010', 'After 2010', "Total"]
write_table(rows, headers, filename)
def count_verbatim_criterion(index, filename):
"""
Calculates counts of the criterion definition and if an associated defition was provided or not
Inputs -
Index - excel file
filename - filename of the tex file
Outputs:
A latex file of the counts containing the criterion, Definitions given, Definitions not given
"""
criterion_given = dict()
criterion_no_given = dict()
for paper, rows in index.items():
for row in rows:
verbatim = row["criterion_verbatim"]
definition = row["criterion_definition_verbatim"]
if verbatim not in {"", "none given", "not given"}:
if definition not in {"", "none given", "not given"}:
if verbatim in criterion_given.keys():
criterion_given[verbatim] = criterion_given[verbatim] + 1
else:
criterion_given[verbatim] = 1
else:
if verbatim in criterion_no_given.keys():
criterion_no_given[verbatim] = criterion_no_given[verbatim] + 1
else:
criterion_no_given[verbatim] = 1
all_criterion = list(set(list(criterion_given.keys()) + list(criterion_no_given.keys())))
rows = []
for crit in all_criterion:
if crit in criterion_given.keys():
given = criterion_given[crit]
else:
given = 0
if crit in criterion_no_given.keys():
not_given = criterion_no_given[crit]
else:
not_given = 0
rows.append([crit, given, not_given])
headers = ['Criterion', 'Definitions \n Given', 'Definitions \n Not Given']
write_table(rows, headers, filename)
def count_verbatim_definiton(index):
"""
Calculates counts of the criterion definition and if an associated defition was provided or not and is grouped by before 2010 or after 2010.
Inputs -
Index - excel file
filename - filename of the tex file
Outputs:
A latex file of the counts containing the criterion, Definitions given, Definitions not given and is grouped by year (before 2010) and (after 2010).
"""
given_b4_2010 = 0
given_after_2010 = 0
not_given_b4_2010 = 0
not_given_after_2010 = 0
for paper, rows in index.items():
for row in rows:
year = row["pub_year"]
definition = row["criterion_definition_verbatim"]
if definition != "" and definition != "not given" and definition != "blank" and definition != "unclear" and definition != "none given":
if year < 2010:
given_b4_2010 += 1
else:
given_after_2010 += 1
else:
if year < 2010:
not_given_b4_2010 += 1
else:
not_given_after_2010 += 1
print("Num of times Verbatim Definition for a criteria is given (pre 2010): {}".format(given_b4_2010))
print("Num of times Verbatim Definition for a criteria is given (post 2010): {}".format(given_after_2010))
print(
"Num of times Verbatim Definition for a criteria is given (Total): {}".format(given_after_2010 + given_b4_2010))
print("------------------------------ \n")
print("Num of times Verbatim Definition for a criteria is not given (pre 2010): {}".format(not_given_b4_2010))
print("Num of times Verbatim Definition for a criteria is not given (post 2010): {}".format(not_given_after_2010))
print("Num of times Verbatim Definition for a criteria is not given (Total): {}".format(
not_given_b4_2010 + not_given_after_2010))
################################################################################
# task to criterion
def task_2_criterion(index, task_counter, filename):
"""
Calculates counts of task and its associated criterion
Inputs -
Index - excel file
task_counter - A counter of task counts
filename - filename of the tex file
Outputs:
A latex file of the counts containing task, verbatim criterion and count.
"""
task2criterion = dict()
for paper, rows in index.items():
for row in rows:
task = row["system_task"]
verbatim = row["criterion_verbatim"]
task = task.replace("multiple (list all): ", "").strip()
if verbatim != "" and verbatim != "none given" and verbatim != "not given":
verbatim = verbatim.lower().strip()
if task in task2criterion:
criterion_values = task2criterion[task]
if verbatim in criterion_values:
criterion_values[verbatim] = criterion_values[verbatim] + 1
else:
criterion_values[verbatim] = 1
task2criterion[task] = criterion_values
else:
criterion_values = {}
criterion_values[verbatim] = 1
task2criterion[task] = criterion_values
rows = []
for key, value in task_counter.most_common():
if key in task2criterion.keys():
v = task2criterion[key]
sorted_values = dict(sorted(v.items(), key=operator.itemgetter(1), reverse=True))
for crit, count in sorted_values.items():
rows.append([key, crit, count])
headers = ['Task', 'Verbatim Criterion', 'Count']
write_table(rows, headers, filename)
df = pd.DataFrame(rows, columns=['Task', 'Verbatim Criterion', 'Count'])
df.to_excel("./Data/task2criterion.xlsx", index=False)
def task_2_criterion_standardized(index, task_counter, filename):
"""
Calculates counts of task and its associated criterion (standardized)
Inputs -
Index - excel file
task_counter - A counter of task counts
filename - filename of the tex file
Outputs:
A latex file of the counts containing task, verbatim criterion (Standardized) and count.
"""
task2criterion = dict()
for paper, rows in index.items():
for row in rows:
task = row["system_task"]
verbatim = row["criterion_paraphrase"]
task = task.replace("multiple (list all): ", "").strip()
verbatim = verbatim.replace("-", "").strip()
verbatim = re.sub(r'(\d*\/*\d*\w\.) ', ' ', verbatim)
verbatim = verbatim.replace(".", "").strip()
verbatim = verbatim.replace("Multiple (list all):", "").strip()
if "," in verbatim:
split_verbatim = verbatim.split(",")
else:
split_verbatim = [verbatim]
for verb in split_verbatim:
if verb != "" and verb != "none given" and verb != "not given":
verb = verb.lower().strip()
if task in task2criterion:
criterion_values = task2criterion[task]
if verb in criterion_values:
criterion_values[verb] = criterion_values[verb] + 1
else:
criterion_values[verb] = 1
task2criterion[task] = criterion_values
else:
criterion_values = {}
criterion_values[verb] = 1
task2criterion[task] = criterion_values
rows = []
for key, value in task_counter.most_common():
if key in task2criterion.keys():
v = task2criterion[key]
sorted_values = dict(sorted(v.items(), key=operator.itemgetter(1), reverse=True))
for crit, count in sorted_values.items():
rows.append([key, crit, count])
headers = ['Task', 'Verbatim Criterion', 'Count']
write_table(rows, headers, filename)
df = | pd.DataFrame(rows, columns=['Task', 'Verbatim Criterion', 'Count']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import datetime
import requests
import json
fr_grade = {13:'3a',
21:'4a',
23:'4b',
25:'4c',
29:'5a',
31:'5b',
33:'5c',
36:'6a',
38:'6a+',
40:'6b',
42:'6b+',
44:'6c',
46:'6c+',
49:'7a',
51:'7a+',
53:'7b',
55:'7b+',
57:'7c',
59:'7c+',
62:'8a',
64:'8a+',
66:'8b',
68:'8b+',
70:'8c',
72:'8c+'}
yds_grade ={13:'4',
21:'5',
23:'6',
25:'7',
29:'8',
31:'9',
33:'10a',
36:'10b',
38:'10c',
40:'10d',
42:'11a',
44:'11b',
46:'11c',
49:'11d',
51:'12a',
53:'12b',
55:'12c',
57:'12d',
59:'13a',
62:'13b',
64:'13c',
66:'13d',
68:'14a',
70:'14b',
72:'14c',
74:'14d',
75:'15a'}
def extract_recent_past(df, date, colname='date', time_range=5, units='days'):
trange = pd.Timedelta(value=time_range, unit=units)
return df.loc[(df[colname] < date) & (df[colname] >= date - trange)]
def build_train_array(weather_df, ascent_df, time_range=12, features=['date','prcp','snow','tmax','tmin']):
if 'date' in features:
feat_per_entry = len(features)-1
else:
feat_per_entry = len(features)
feature_array = np.empty(shape=(len(ascent_df),feat_per_entry*time_range))
feature_array[:] = np.nan
ascents = ascent_df.values
for idx, row in zip(tqdm(ascent_df.index),range(len(ascent_df))):
recent_past = extract_recent_past(weather_df, \
pd.to_datetime(idx),\
time_range=time_range)
recent_past = recent_past[['prcp','snow','tmax','tmin']]
rp_vals = recent_past.stack().values
if rp_vals != []:
try: feature_array[row,:] = rp_vals
except: print('Oops on row',row)
#feature_array[row,:] = recent_past.stack().values
#print(row,recent_past.stack().values)
return feature_array, ascents
def chop_nans(x,y):
return x[~np.isnan(x).any(axis=1)], y[~np.isnan(x).any(axis=1)]
def build_sendless_array(weather_df, ascent_df, time_range=12, features=['date','prcp','snow','tmax','tmin']):
if 'date' in features:
feat_per_entry = len(features)-1
else:
feat_per_entry = len(features)
feature_array = np.empty(shape=(len(ascent_df),feat_per_entry*time_range))
feature_array[:,:] = np.nan
ascents = ascent_df.values
for idx, row in zip(ascent_df.index,range(len(ascent_df))):
recent_past = extract_recent_past(weather_df, \
pd.to_datetime(idx),\
time_range=time_range)
recent_past = recent_past[['prcp','snow','tmax','tmin']]
rp_vals = recent_past.stack().values
if rp_vals != []:
try: feature_array[row,:] = rp_vals
except: print('Oops on row',row)
#feature_array[row,:] = recent_past.stack().values
#print(row,recent_past.stack().values)
return feature_array, ascents
def gen_new_date(weather_df, weekday_prob):
# draw a day of the week
weekday = np.random.choice([0,1,2,3,4,5,6], p = weekday_prob.sort_index(axis=0).values)
return weather_df[pd.to_datetime(weather_df.date).dt.weekday == weekday].sample(n=1).date
def gen_sendfree_list(weather_df, ascent_df, weekday_prob, num=2800):
#nosend_dates = np.empty(shape=[len(ascent_df),], dtype='datetime64')
nosend_dates = pd.DataFrame(data = None,
columns = None,
dtype = 'object')
#for idx in tqdm(range(len(ascent_df))):
for idx in tqdm(range(num)):
new_date = gen_new_date(weather_df, weekday_prob)
while pd.to_datetime(new_date.array)[0] in pd.to_datetime(ascent_df.index):
#print(new_date)
new_date = gen_new_date(weather_df, weekday_prob)
#nosend_dates.iloc[idx] = new_date.array.date
nosend_dates = nosend_dates.append({'date':str(pd.to_datetime(new_date.values[0]).date())},ignore_index=True)
newdf = pd.Series(0, index=pd.Index(nosend_dates['date']))
return newdf
def qry(q, connection = sqlite3.connect("./db1.sqlite")):
df = pd.read_sql_query(q, connection)
connection.close
return df
def get_weather_multiyear(station_id, start_date, end_date, token, limit=1000, units='standard', datasetid='GHCND',**kwargs):
start_date = | pd.to_datetime(start_date) | pandas.to_datetime |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.loc[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
assert taken.names == self.index.names
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
(np.nan, pd.NaT)]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def take_invalid_kwargs(self):
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
@pytest.mark.parametrize('other',
[Index(['three', 'one', 'two']),
Index(['one']),
Index(['one', 'three'])])
def test_join_level(self, other, join_type):
join_index, lidx, ridx = other.join(self.index, how=join_type,
level='second',
return_indexers=True)
exp_level = other.join(self.index.levels[1], how=join_type)
assert join_index.levels[0].equals(self.index.levels[0])
assert join_index.levels[1].equals(exp_level)
# pare down levels
mask = np.array(
[x[1] in exp_level for x in self.index], dtype=bool)
exp_values = self.index.values[mask]
tm.assert_numpy_array_equal(join_index.values, exp_values)
if join_type in ('outer', 'inner'):
join_index2, ridx2, lidx2 = \
self.index.join(other, how=join_type, level='second',
return_indexers=True)
assert join_index.equals(join_index2)
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
def test_join_level_corner_case(self):
# some corner cases
idx = Index(['three', 'one', 'two'])
result = idx.join(self.index, level='second')
assert isinstance(result, MultiIndex)
tm.assert_raises_regex(TypeError, "Join.*MultiIndex.*ambiguous",
self.index.join, self.index, level=1)
def test_join_self(self, join_type):
res = self.index
joined = res.join(res, how=join_type)
assert res is joined
def test_join_multi(self):
# GH 10665
midx = pd.MultiIndex.from_product(
[np.arange(4), np.arange(4)], names=['a', 'b'])
idx = pd.Index([1, 2, 5], name='b')
# inner
jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)
exp_idx = pd.MultiIndex.from_product(
[np.arange(4), [1, 2]], names=['a', 'b'])
exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)
exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# keep MultiIndex
jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)
exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0,
1, -1], dtype=np.intp)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_reindex(self):
result, indexer = self.index.reindex(list(self.index[:4]))
assert isinstance(result, MultiIndex)
self.check_level_names(result, self.index[:4].names)
result, indexer = self.index.reindex(list(self.index))
assert isinstance(result, MultiIndex)
assert indexer is None
self.check_level_names(result, self.index.names)
def test_reindex_level(self):
idx = Index(['one'])
target, indexer = self.index.reindex(idx, level='second')
target2, indexer2 = idx.reindex(self.index, level='second')
exp_index = self.index.join(idx, level='second', how='right')
exp_index2 = self.index.join(idx, level='second', how='left')
assert target.equals(exp_index)
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)
assert target2.equals(exp_index2)
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)
tm.assert_raises_regex(TypeError, "Fill method not supported",
self.index.reindex, self.index,
method='pad', level='second')
tm.assert_raises_regex(TypeError, "Fill method not supported",
idx.reindex, idx, method='bfill',
level='first')
def test_duplicates(self):
assert not self.index.has_duplicates
assert self.index.append(self.index).has_duplicates
index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[
[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])
assert index.has_duplicates
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
(u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
(u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
(u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
(u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),
(u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),
(u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114),
(u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121),
(u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126),
(u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155),
(u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123),
(u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)]
index = pd.MultiIndex.from_tuples(t)
assert not index.has_duplicates
# handle int64 overflow if possible
def check(nlevels, with_nulls):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
if with_nulls: # inject some null values
labels[500] = -1 # common nan value
labels = [labels.copy() for i in range(nlevels)]
for i in range(nlevels):
labels[i][500 + i - nlevels // 2] = -1
labels += [np.array([-1, 1]).repeat(500)]
else:
labels = [labels] * nlevels + [np.arange(2).repeat(500)]
levels = [level] * nlevels + [[0, 1]]
# no dups
index = MultiIndex(levels=levels, labels=labels)
assert not index.has_duplicates
# with a dup
if with_nulls:
def f(a):
return np.insert(a, 1000, a[0])
labels = list(map(f, labels))
index = MultiIndex(levels=levels, labels=labels)
else:
values = index.values.tolist()
index = MultiIndex.from_tuples(values + [values[0]])
assert index.has_duplicates
# no overflow
check(4, False)
check(4, True)
# overflow possible
check(8, False)
check(8, True)
# GH 9125
n, k = 200, 5000
levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)]
labels = [np.random.choice(n, k * n) for lev in levels]
mi = MultiIndex(levels=levels, labels=labels)
for keep in ['first', 'last', False]:
left = mi.duplicated(keep=keep)
right = pd._libs.hashtable.duplicated_object(mi.values, keep=keep)
tm.assert_numpy_array_equal(left, right)
# GH5873
for a in [101, 102]:
mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]])
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
2, dtype='bool'))
for n in range(1, 6): # 1st level shape
for m in range(1, 5): # 2nd level shape
# all possible unique combinations, including nan
lab = product(range(-1, n), range(-1, m))
mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]],
labels=np.random.permutation(list(lab)).T)
assert len(mi) == (n + 1) * (m + 1)
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
len(mi), dtype='bool'))
def test_duplicate_meta_data(self):
# GH 10115
index = MultiIndex(
levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
for idx in [index,
index.set_names([None, None]),
index.set_names([None, 'Num']),
index.set_names(['Upper', 'Num']), ]:
assert idx.has_duplicates
assert idx.drop_duplicates().names == idx.names
def test_get_unique_index(self):
idx = self.index[[0, 1, 0, 1, 1, 0, 0]]
expected = self.index._shallow_copy(idx[[0, 1]])
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
assert result.unique
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_unique(self, names):
mi = pd.MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('abab')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([list('aa'), list('ab')],
names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('aaaa')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
tm.assert_index_equal(res, exp)
# GH #20568 - empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=names)
res = mi.unique()
tm.assert_index_equal(mi, res)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_unique_level(self, level):
# GH #17896 - with level= argument
result = self.index.unique(level=level)
expected = self.index.get_level_values(level).unique()
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
import sklearn
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score, precision_score
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics import roc_auc_score
import os
import optuna
import random
import cvxopt
import cvxopt.solvers
import sklearn
cvxopt.solvers.options['show_progress'] = False
def get_label(type=0):
y = pd.read_csv('data/Ytr.csv',sep=',',index_col=0)
if type == 0:
y = y.Bound.values
return y
else:
y['Bound'] = y.Bound.apply(lambda x: -1 if x == 0 else 1)
y = y.Bound.values
return y
def get_train_test(X,y,p):
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=p, random_state=42)
print('Data Shapes: ,',X_train.shape,X_test.shape,y_train.shape, y_test.shape)
return X_train, X_test, y_train, y_test
def getKmers(sequence, size=3):
return [sequence[x:x+size].lower() for x in range(len(sequence) - size + 1)]
def base2int(c):
return {'a':0,'c':1,'g':2,'t':3}.get(c,0)
def index(kmer):
base_idx = np.array([base2int(base) for base in kmer])
multiplier = 4** np.arange(len(kmer))
kmer_idx = multiplier.dot(base_idx)
return kmer_idx
def spectral_embedding(sequence,kmer_size=3):
kmers = getKmers(sequence,kmer_size)
kmer_idxs = [index(kmer) for kmer in kmers]
one_hot_vector = np.zeros(4**kmer_size)
for kmer_idx in kmer_idxs:
one_hot_vector[kmer_idx] += 1
return one_hot_vector
def get_data(kmer_size):
data = pd.DataFrame(pd.concat([X_train_.seq,X_test_.seq],axis=0))
train_text = data.seq.values
kmer_data = []
for i in train_text:
kmer_data.append(spectral_embedding(i,kmer_size=kmer_size))
return np.array(kmer_data)
def sigma_from_median(X):
pairwise_diff = X[:, :, None] - X[:, :, None].T
pairwise_diff *= pairwise_diff
euclidean_dist = np.sqrt(pairwise_diff.sum(axis=1))
return np.median(euclidean_dist)
def gaussian_kernel(x, y, sigma=5.0):
return np.exp(-np.linalg.norm(x-y)**2 / (2 * (sigma ** 2)))
def linear_kernel(x1, x2):
return np.dot(x1, x2.T)
def polynomial_kernel(X1, X2, power=2):
return np.power((1 + linear_kernel(X1, X2)),power)
def rbf_kernel(X1, X2, sigma=10):
X2_norm = np.sum(X2 ** 2, axis = -1)
X1_norm = np.sum(X1 ** 2, axis = -1)
gamma = 1 / (2 * sigma ** 2)
K = np.exp(- gamma * (X1_norm[:, None] + X2_norm[None, :] - 2 * np.dot(X1, X2.T)))
return K
class KernelMethodBase(object):
kernels_ = {
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'rbf': rbf_kernel,
'gaussian':gaussian_kernel
}
def __init__(self, kernel='linear', **kwargs):
self.kernel_name = kernel
self.kernel_function_ = self.kernels_[kernel]
self.kernel_parameters = self.get_kernel_parameters(**kwargs)
def get_kernel_parameters(self, **kwargs):
params = {}
if self.kernel_name == 'rbf' or self.kernel_name == 'gaussian':
params['sigma'] = kwargs.get('sigma', None)
if self.kernel_name == 'polynomial':
params['power'] = kwargs.get('power', None)
return params
def fit(self, X, y, **kwargs):
return self
def decision_function(self, X):
pass
def predict(self, X):
pass
class KernelRidgeRegression(KernelMethodBase):
def __init__(self, lambd=0.1, **kwargs):
self.lambd = lambd
super(KernelRidgeRegression, self).__init__(**kwargs)
def fit(self, X, y, sample_weights=None):
n, p = X.shape
assert (n == len(y))
self.X_train = X
self.y_train = y
if sample_weights is not None:
w_sqrt = np.sqrt(sample_weights)
self.X_train = self.X_train * w_sqrt[:, None]
self.y_train = self.y_train * w_sqrt
A = self.kernel_function_(X,X,**self.kernel_parameters)
A[np.diag_indices_from(A)] = np.add(A[np.diag_indices_from(A)],n*self.lambd)
self.alpha = np.linalg.solve(A , self.y_train)
return self
def decision_function(self, X):
K_x = self.kernel_function_(X,self.X_train, **self.kernel_parameters)
return K_x.dot(self.alpha)
def predict(self, X):
return self.decision_function(X)
class KernelSVM(KernelMethodBase):
def __init__(self, C=0.1, **kwargs):
self.C = C
# Python 3: replace the following line by
# super().__init__(**kwargs)
super(KernelSVM, self).__init__(**kwargs)
def cvxopt_qp(self,P, q, G, h, A, b):
P = .5 * (P + P.T)
cvx_matrices = [
cvxopt.matrix(M) if M is not None else None for M in [P, q, G, h, A, b]
]
#cvxopt.solvers.options['show_progress'] = False
solution = cvxopt.solvers.qp(*cvx_matrices, options={'show_progress': False})
return np.array(solution['x']).flatten()
def svm_dual_soft_to_qp_kernel(self,K, y, C=1):
n = K.shape[0]
assert (len(y) == n)
# Dual formulation, soft margin
# P = np.diag(y) @ K @ np.diag(y)
P = np.diag(y).dot(K).dot(np.diag(y))
# As a regularization, we add epsilon * identity to P
eps = 1e-12
P += eps * np.eye(n)
q = - np.ones(n)
G = np.vstack([-np.eye(n), np.eye(n)])
h = np.hstack([np.zeros(n), C * np.ones(n)])
A = y[np.newaxis, :]
b = np.array([0.])
return P, q, G, h, A.astype(float), b
def fit(self, X, y, tol=1e-8):
n, p = X.shape
assert (n == len(y))
self.X_train = X
self.y_train = y
# Kernel matrix
K = self.kernel_function_(
self.X_train, self.X_train, **self.kernel_parameters)
# Solve dual problem
self.alpha = self.cvxopt_qp(*self.svm_dual_soft_to_qp_kernel(K, y, C=self.C))
# Compute support vectors and bias b
sv = np.logical_and((self.alpha > tol), (self.C - self.alpha > tol))
self.bias = y[sv] - K[sv].dot(self.alpha * y)
self.bias = self.bias.mean()
self.support_vector_indices = np.nonzero(sv)[0]
return self
def decision_function(self, X):
K_x = self.kernel_function_(X, self.X_train, **self.kernel_parameters)
return K_x.dot(self.alpha * self.y_train) + self.bias
def predict(self, X):
return np.sign(self.decision_function(X))
def cross_validate(x_data,y_data,model_name,lr=None,kernel=None,lambd=0.2,C=3,sigma=0.5,k=5,power=2):
if len(x_data)%k != 0:
print('cant vsplit',len(x_data),' by ',k)
return
x_data_splitted = np.vsplit(x_data,k)
y_data_splitted = np.vsplit(y_data.reshape(-1,1),k)
aggrigate_result = []
for i in range(len(x_data_splitted)):
train = []
test = []
items = [j for j in range(len(x_data_splitted)) if j !=i ]
x_test = x_data_splitted[i]
y_test = y_data_splitted[i]
for item in items:
if len(train) == 0:
x_train = x_data_splitted[item]
y_train = y_data_splitted[item]
else:
x_train = np.concatenate((x_train,x_data_splitted[item]), axis=0)
y_train = np.concatenate((y_train,y_data_splitted[item]), axis=0)
if model_name == 'KernelRidgeRegression':
model = KernelRidgeRegression(
kernel=kernel,
lambd=lambd,
sigma=sigma,
power=power
).fit(x_train, y_train)
result =sum(np.sign(model.predict(x_test))==y_test)/len(y_test)#roc_auc_score(np.sign(model.predict(x_test)),y_test) #
elif model_name == 'KernelSVM':
model = KernelSVM(C=C,
kernel=kernel,
lambd=lambd,
sigma=sigma,
power=power)
model.fit(x_train, y_train.flatten())
y_pred = model.predict(x_test)
result = sum((y_pred.flatten()==y_test.flatten()))/len(y_test)
else:
print('wrong model_name')
return 0
aggrigate_result.append(result)
value = sum(aggrigate_result)/len(aggrigate_result)
return value
def main():
np.random.seed(42)
random.seed(42)
#TRAINING ON ALMOST ALL DATA
X_train, X_test, y_train, y_test = get_train_test(get_data(8)[:2000,:],get_label(type=-1),0.33)
# model = KernelRidgeRegression(
# kernel='linear',
# lambd=0.688381
# ).fit(X_train, y_train)
# We dont need to worry about parameters that doesnt matter for this kernel because cross validater method handles it
# model = KernelRidgeRegression(
# kernel='polynomial',
# lambd=1.418356,
# sigma=2,
# power=2,
# C= 27.187947
# ).fit(X_train, y_train)
model = KernelSVM(C= 4.202029033820121,
kernel='rbf',
sigma=15.988389521578528)
model.fit(X_train, y_train.flatten())
# USING SIGN TO ACCOMPANY FOR BOTH
result = sum(np.sign(model.predict(X_test).flatten())==y_test.flatten())/len(y_test.flatten())
cv_result = cross_validate(get_data(8)[:2000,:],get_label(type=-1),
model_name='KernelSVM',
C= 4.202029033820121,
kernel='rbf',
sigma=15.988389521578528,
k=4)
# print('Accuracy on 70-30 Split {}'.format(result))
print('Accuracy on Cross Validation {}'.format(cv_result))
X_test_final = np.sign(model.predict(get_data(8)[2000:,:]))
sumbission = []
for i in range(len(X_test_final)):
r1 = X_test_final[i]
if r1 == 1:
sumbission.append([i,int(r1)])
elif r1 == -1:
sumbission.append([i,0])
else:
print('problem')
# sumbission
df = pd.DataFrame(sumbission)
df.columns = ['Id','Bound']
df.to_csv('cv_'+str(cv_result)+'_.csv',index=False)
if __name__ == "__main__":
X_test_ = pd.read_csv('data/Xte.csv',sep=',',index_col=0)
X_train_ = pd.read_csv('data/Xtr.csv',sep=',',index_col=0)
X_test_mat100 = | pd.read_csv('data/Xte_mat100.csv',sep=' ',header=None) | pandas.read_csv |
import os
import time
import pandas as pd
import numpy as np
import json
from hydroDL import kPath
from hydroDL.data import usgs, gageII, gridMET, ntn
from hydroDL.post import axplot, figplot
import matplotlib.pyplot as plt
dirInv = os.path.join(kPath.dirData, 'USGS', 'inventory')
fileSiteNo = os.path.join(dirInv, 'siteNoLst-1979')
siteNoLstAll = pd.read_csv(fileSiteNo, header=None, dtype=str)[0].tolist()
tabG = gageII.readData(
varLst=['NWIS_DRAIN_SQKM', 'BASIN_BOUNDARY_CONFIDENCE'], siteNoLst=siteNoLstAll)
# read NTN
dirNTN = os.path.join(kPath.dirData, 'EPA', 'NTN')
fileData = os.path.join(dirNTN, 'NTN-All-w.csv')
fileSite = os.path.join(dirNTN, 'NTNsites.csv')
ntnData = pd.read_csv(fileData)
ntnSite = pd.read_csv(fileSite)
ntnData['siteID'] = ntnData['siteID'].apply(lambda x: x.upper())
ntnData = ntnData.replace(-9, np.nan)
ntnIdLst = ntnData['siteID'].unique().tolist()
crdNTN = pd.read_csv(os.path.join(dirNTN, 'crdNTN.csv'), index_col='siteid')
crdNTN = crdNTN.drop(['CO83', 'NC30', 'WI19'])
crdUSGS = pd.read_csv(os.path.join(
dirNTN, 'crdUSGS.csv'), dtype={'STAID': str})
crdUSGS = crdUSGS.set_index('STAID')
t = pd.date_range(start='1979-01-01', end='2019-12-31', freq='W-TUE')
t = t[1:]
# varC = usgs.varC
varC = ['00940']
varNtn = ['Cl', 'subppt']
# siteNoLst = ['0422026250', '04232050', '0423205010']
siteNo = '04193500'
# siteNo = '01184000'
siteNoLstAll.index(siteNo)
# find NTN sites
usgsId = siteNo
x = crdUSGS.loc[usgsId]['x']
y = crdUSGS.loc[usgsId]['y']
dist = np.sqrt((x-crdNTN['x'])**2+(y-crdNTN['y'])**2)
dist = dist.drop(dist[dist > 500*1000].index)
data = np.full([len(t), len(varNtn)], np.nan)
distOut = np.full(len(t), np.nan)
idOut = np.full(len(t), np.nan, dtype=object)
while len(dist) > 0:
ntnId = dist.idxmin()
# temp = dictNTN[ntnId].values
tab = ntnData[ntnData['siteID'] == ntnId]
tab.index = pd.to_datetime(tab['dateoff'])
out = pd.DataFrame(index=t)
tol = pd.Timedelta(3, 'D')
out = pd.merge_asof(left=out, right=tab, right_index=True,
left_index=True, direction='nearest', tolerance=tol)
temp = out[varNtn].values
matNan = np.isnan(data)
indRow = np.unique(np.where(matNan)[0])
data[matNan] = temp[matNan]
idOut[indRow] = ntnId
distOut[indRow] = dist[ntnId]
dist = dist.drop(ntnId)
indRow = np.unique(np.where(np.isnan(data))[0])
if len(indRow) == 0:
break
# end of while
distOut[indRow] = np.nan
idOut[indRow] = np.nan
dfP = pd.DataFrame(index=t, columns=varNtn, data=data)
dfP['distNTN'] = distOut
dfP['idNTN'] = idOut
dfP.index.name = 'date'
# read C, Q, F
dfC = usgs.readSample(siteNo, codeLst=varC)
dfQ = usgs.readStreamflow(siteNo)
dfF = gridMET.readBasin(siteNo)
# convert to weekly
td = | pd.date_range(start='1979-01-01', end='2019-12-30', freq='D') | pandas.date_range |
#! -*- coding: utf-8 -*-
import ToolsNLP
import gensim
import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
from wordcloud import WordCloud
import pandas as pd
import re
import site
import os
class TopicModelWrapper:
'''
Description::
トピックモデルを実行する
:param data:
入力データ
[[entry_id1, sentence1], [entry_id2, sentence2], ]
:param config_mw:
MecabWrapperのパラメータを設定
デフォルト設定
config_mw = {
'dicttype':'ipadic'
,'userdict': ''
,'stopword': ''
}
:param config_tn:
MecabWrapper.tokenizeのパラメータを設定
デフォルト設定
config_tn = {
'pos_filter': [['名詞', '一般', '*']]
,'is_normalized': True
,'is_org': True
,'is_pos': False
}
:param kwargs:
トピックモデルの設定
デフォルト設定
kwargs = {
# 基本設定
'stop_term_toprate': 0.05
,'stop_term_limitcnt': 3
,'topic_doc_threshold': 0.01
# コーパス設定
,'is_1len': True
,'is_term_fq': True
# LDA設定
,'num_topics': 100
,'iterations': 100
,'alpha': 'auto'
}
# 基本設定(stop_term_toprate = 単語出現頻度上位n%を除外する
,stop_term_limitcnt = 単語頻度がn以下を除外する
,topic_doc_threshold = トピックに対して紐づけるドキュメントのスコアの閾値)
# コーパス設定(is_1len = 形態素解析後の英数字の文字列が1のものを除外するかどうか
,is_term_fq = 単語出現頻度のフィルタを実施するかどうか)
# LDA設定(num_topics = トピック数
,iterations = イテレーション回数
,alpha = アルファの設定)
Usage::
>>> import ToolsNLP
# doctest用にエスケースしている。元のコード `data = [i.strip('\n').split('\t') for i in open('data.tsv', 'r')]`
>>> data = [i.strip('\\n').split('\\t') for i in open('data.tsv', 'r')]
>>> # data[:1] output:[['http://news.livedoor.com/article/detail/4778030/', '友人代表のスピーチ、独女はどうこなしている? ...]]
>>>
>>> t = ToolsNLP.TopicModelWrapper(data=data, config_mw={'dicttype':'neologd'})
read data ...
documents count => 5,980
tokenize text ...
make corpus ...
all token count => 24,220
topic count => 100
make topic model ...
make output data ...
DONE
>>>
'''
def __init__(self, data, config_mw={}, config_tn={}, **kwargs):
sitedir = site.getsitepackages()[-1]
installdir = os.path.join(sitedir, 'ToolsNLP')
self._fpath = installdir + '/.fonts/ipaexg.ttf'
# 基本設定
self._stop_term_limitcnt = kwargs.get('stop_term_limitcnt', 3)
self._topic_doc_threshold = kwargs.get('topic_doc_threshold', 0.01)
# コーパス設定
self._is_1len = kwargs.get('is_1len', True)
self._is_term_fq = kwargs.get('is_term_fq', True)
# LDA設定
self._num_topics = kwargs.get('num_topics', 100)
self._iterations = kwargs.get('iterations', 100)
self._alpha = kwargs.get('alpha', 'auto')
# 形態素解析設定
self._config_mw = config_mw
self._config_tn = config_tn
if 'pos_filter' not in self._config_tn:
self._config_tn['pos_filter'] = [['名詞', '一般', '*']]
self.m = ToolsNLP.MecabWrapper(**self._config_mw)
print('read data ...')
self._data = data
print('{}{:,}'.format('documents count => ',len(self._data)))
print('tokenize text ...')
self._texts = self.__tokenizer_text()
print('make corpus ...')
self._dictionary, self._corpus, self._texts_cleansing = self.__create_corpus()
print('{}{:,}'.format('topic count => ',self._num_topics))
print('make topic model ...')
self._lda = self.__create_topic_model()
self._lda_corpus = self._lda[self._corpus]
print('make output data ...')
self._topic_list, self._topic_df = self.__count_topic()
self._df_docweight = self.__proc_docweight()
print('DONE')
def __tokenizer_text(self):
return [self.m.tokenize(sentence=doc[1], is_list=True, **self._config_tn) for doc in self._data]
def __stop_term(self, is_1len, is_term_fq):
r = re.compile('^[ぁ-んァ-ン0-9a-zA-Z]$')
count = Counter(w for doc in self._texts for w in doc)
if is_1len and is_term_fq:
return [[w for w in doc if count[w] >= self._stop_term_limitcnt and not re.match(r,w)] for doc in self._texts]
elif is_1len and not is_term_fq:
return [[w for w in doc if count[w] >= self._stop_term_limitcnt] for doc in self._texts]
elif not is_1len and is_term_fq:
return [[w for w in doc if not re.match(r,w)] for doc in self._texts]
else:
return self._texts
def __create_corpus(self):
texts = self.__stop_term(self._is_1len, self._is_term_fq)
dictionary = gensim.corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
return dictionary, corpus, texts
def __create_topic_model(self):
return gensim.models.ldamodel.LdaModel(corpus=self._corpus
,id2word=self._dictionary
,num_topics=self._num_topics
,iterations=self._iterations
,alpha=self._alpha
,dtype=np.float64
)
def __count_topic(self):
topic_counnter = Counter(topic[0] for doc in self._lda_corpus for topic in doc if topic[1] > self._topic_doc_threshold).most_common()
topic_list = list( | pd.DataFrame(topic_counnter) | pandas.DataFrame |
from dateutil import parser
import numpy as np
import pandas as pd
import urllib3
import json
import datetime as dt
import time
import warnings
import math
#######################################################################
# drops invalid data from our history
def dropDirty(history, exWeekends):
history = history[(history.Open != 0)
& (history.High != 0)
& (history.Low != 0)
& (history.Close != 0)]
history = history[(pd.isnull(history.Open) == False)
& (pd.isnull(history.High) == False)
& (pd.isnull(history.Low) == False)
& (pd.isnull(history.Close) == False)]
# we're going to drop any days where the open and high and low and close
# equal one another. these are invalid (closed) days
history = history[((history.Open == history.Close)
& (history.Open == history.High)
& (history.Open == history.Low)) == False]
if exWeekends:
dts = | pd.to_datetime(history.index) | pandas.to_datetime |
#!/usr/bin/env python
import argparse
import pandas as pd
from Bio import SeqIO
import os
import numpy as np
from collections import OrderedDict
from tqdm import tqdm
from abnumber import Chain
import re
import requests
import time
SCORE_REGEX = re.compile('<h3>The Z-score value of the Query sequence is: (-?[0-9.]+)</h3>')
def get_z_score_online(seq):
chain = Chain(seq, scheme='imgt')
chain_type = 'human_heavy' if chain.chain_type == 'H' else ('human_lambda' if chain.chain_type == 'L' else 'human_kappa')
html = None
for retry in range(5):
url = f'http://www.bioinf.org.uk/abs/shab/shab.cgi?aa_sequence={seq}&DB={chain_type}'
request = requests.get(url)
time.sleep(0.5 + retry * 5)
if request.ok:
html = request.text
break
else:
print('Retry', retry+1)
if not html:
raise ValueError('Z-score server is not accessible')
matches = SCORE_REGEX.findall(html)
if not matches:
print(html)
raise ValueError(f'Error calling url {url}')
return float(matches[0])
def get_z_scores_online(queries):
results = []
for query in tqdm(queries):
zscore = get_z_score_online(query.seq)
results.append(OrderedDict(
id=query.id,
description=query.description,
zscore=zscore
))
return | pd.DataFrame(results) | pandas.DataFrame |
# importing all the required libraries
import numpy as np
import pandas as pd
from datetime import datetime
import time, datetime
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler, LabelEncoder, MinMaxScaler
from chart_studio.plotly import plotly
import plotly.offline as offline
import plotly.graph_objs as go
offline.init_notebook_mode()
from collections import Counter
import pickle
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import math
from tqdm import tqdm
# Reading all the files
air_visit_data = pd.read_csv('air_visit_data.csv')
air_store_info = pd.read_csv('air_store_info.csv')
air_reserve = pd.read_csv('air_reserve.csv')
hpg_store_info = pd.read_csv('hpg_store_info.csv')
hpg_reserve = pd.read_csv('hpg_reserve.csv')
date_info = pd.read_csv('date_info.csv')
store_id_relation = pd.read_csv('store_id_relation.csv')
sample_submission = pd.read_csv('sample_submission.csv')
# error metric
# kaggle
def root_mean_squared_logarithmic_error(p,a):
err=0
for i in range(len(p)):
err=err+((np.log(p[i]+1)-np.log(a[i]+1))**2)
total_error=(np.sqrt(err/len(p)))
return total_error
# code taken from,
# https://stackoverflow.com/questions/238260/how-to-calculate-the-bounding-box-for-a-given-lat-lng-location/238558#238558
# by <NAME> (https://stackoverflow.com/users/18770/federico-a-ramponi)
# This snippet of code basically takes a set of latitude and longitude coverts it into radius(distance) due to \
# speroidical shape of earth \
# and returns 4 coordinates which surround the set of latitude and longitude as a box.
# degrees to radians
def deg2rad(degrees):
return math.pi*degrees/180.0
# radians to degrees
def rad2deg(radians):
return 180.0*radians/math.pi
# Semi-axes of WGS-84 geoidal reference
WGS84_a = 6378137.0 # Major semiaxis [m]
WGS84_b = 6356752.3 # Minor semiaxis [m]
# Earth radius at a given latitude, according to the WGS-84 ellipsoid [m]
def WGS84EarthRadius(lat):
# http://en.wikipedia.org/wiki/Earth_radius
An = WGS84_a*WGS84_a * math.cos(lat)
Bn = WGS84_b*WGS84_b * math.sin(lat)
Ad = WGS84_a * math.cos(lat)
Bd = WGS84_b * math.sin(lat)
return math.sqrt( (An*An + Bn*Bn)/(Ad*Ad + Bd*Bd) )
# Bounding box surrounding the point at given coordinates,
# assuming local approximation of Earth surface as a sphere
# of radius given by WGS84
def boundingBox(latitudeInDegrees, longitudeInDegrees, halfSideInKm):
lat = deg2rad(latitudeInDegrees)
lon = deg2rad(longitudeInDegrees)
halfSide = 1000*halfSideInKm
# Radius of Earth at given latitude
radius = WGS84EarthRadius(lat)
# Radius of the parallel at given latitude
pradius = radius*math.cos(lat)
latMin = lat - halfSide/radius
latMax = lat + halfSide/radius
lonMin = lon - halfSide/pradius
lonMax = lon + halfSide/pradius
return (rad2deg(latMin), rad2deg(lonMin), rad2deg(latMax), rad2deg(lonMax))
def final_fun_2(air_visit_data, air_store_info, hpg_store_info, date_info, store_id_relation):
bounding_box_lat=[]
bounding_box_lon=[]
for i in range(len(air_store_info)):
bounding_box_lat.append(air_store_info['latitude'][i])
bounding_box_lon.append(air_store_info['longitude'][i])
neighbour=[]
lat_1=[]
lon_1=[]
lat_2=[]
lon_2=[]
for i in range(len(air_store_info)):
lat1, lon1, lat2, lon2=boundingBox(bounding_box_lat[i],bounding_box_lon[i],1.5)
lat_1.append(lat1)
lon_1.append(lon1)
lat_2.append(lat2)
lon_2.append(lon2)
for i in range(len(air_store_info)):
count=0
for j in range(len(air_store_info)):
if bounding_box_lat[j]>lat_1[i] and bounding_box_lat[j]<lat_2[i] and bounding_box_lon[j]>lon_1[i] and bounding_box_lon[j]<lon_2[i]:
count=count+1
neighbour.append(count-1)
air_store_info['nearest_neighbour']=neighbour
air_store_info=air_store_info.rename(columns={"air_genre_name":"genre_name","air_area_name":"area_name"})
hpg_store_info=hpg_store_info.rename(columns={"hpg_genre_name":"genre_name","hpg_area_name":"area_name"})
date_info=date_info.rename(columns={"calendar_date":"visit_date"})
total_data= | pd.merge(air_visit_data,date_info,how='left',on=['visit_date']) | pandas.merge |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import streamlit as st
from scipy import stats
from fairlearn.metrics import MetricFrame
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score, recall_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
# Functions
def compute_t_test(df, sensitive_feature):
indexes = list(set(df.index.get_level_values(0)))
significant_features = []
for feature in df.columns:
t_val, p_val = stats.ttest_ind(
df.loc[indexes[0]][feature].values,
df.loc[indexes[1]][feature].values
)
if p_val < 0.05:
significant_features.append(feature)
return significant_features
def highlight_col(x):
r = 'background-color: red'
df1 = | pd.DataFrame('', index=x.index, columns=x.columns) | pandas.DataFrame |
from matplotlib import pyplot as plt
import csv
from absl import app, flags, logging
from absl.flags import FLAGS
import os
import scipy.io
import numpy as np
import cv2
import tqdm
from sklearn.metrics import average_precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
import pandas as pd
import seaborn as sns
import datetime
import glob
import re
import string
import sys
import cv2
import re
import ast
import shutil
import random
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import confusion_matrix
import copy
import collections
def _check_ext(path, default_ext):
name, ext = os.path.splitext(path)
if ext == '':
if default_ext[0] == '.':
default_ext = default_ext[1:]
path = name + '.' + default_ext
return path
def save_yaml(path, data, **kwargs):
import oyaml as yaml
path = _check_ext(path, 'yml')
with open(path, 'w') as f:
yaml.dump(data, f, **kwargs)
def convert_categorical_str_to_numerical(category_list):
"""
Takes a category list of strings and converts it to integers, e.g:
category_list = [dog, cat, horse, dog, cow]
return: [0, 1, 2, 0, 3]
:param category_list: (list) list of string categories
:return: (list)
"""
unique = list(np.unique(category_list))
return [unique.index(u) for u in category_list]
def match_pair_of_data(data_file_1, data_file_2):
"""
matches pairs of data from two csv files
:param data_file_1: (str) CSV file absolute path
:param data_file_2: (str) CSV file absolute path
:return: (list, list) list of numerical values for a list of inputs that matches name
"""
y_test = []
y_pred = []
data_file1 = pd.read_csv(data_file_1)
data_file2 = pd.read_csv(data_file_2)
gt_categories = convert_categorical_str_to_numerical(data_file2['tissue type'].tolist())
gt_file_names = data_file2['image_name'].tolist()
predict_fnames = data_file1['fname'].tolist()
predict_categories = data_file1['class_2'].tolist()
print(f'found {len(gt_file_names)} cases in file 1 and {len(predict_fnames)} cases in file 2')
for i, name in enumerate(predict_fnames):
if name in gt_file_names:
y_pred.append(float(predict_categories[i]))
y_test.append(float(gt_categories[gt_file_names.index(name)]))
print(f'{len(y_test)} cases matched names')
return y_test, y_pred
def calculate_auc_and_roc(predicted, real, case_name, plot=True, results_directory='',
results_id='', save_plot=False):
"""
:param predicted:
:param real:
:param case_name:
:param plot:
:param results_directory:
:param results_id:
:param save_plot:
:return:
"""
y_test, y_pred = match_pair_of_data(predicted, real)
fpr_keras, tpr_keras, thresholds_keras = roc_curve(y_test, y_pred)
auc_keras = auc(fpr_keras, tpr_keras)
plt.figure()
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_keras, tpr_keras, label=case_name + '(area = {:.3f})'.format(auc_keras))
# plt.plot(fpr_rf, tpr_rf, label='RF (area = {:.3f})'.format(auc_rf))
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
if save_plot is True:
name_fig = ''.join(['roc_', results_id, '_.png'])
plt.savefig(results_directory + name_fig)
if plot is True:
plt.show()
plt.close()
return auc_keras
def check_file_isvid(filename):
"""
checks if a file has a video extension, accepted files are: '.mp4', '.mpg', '.avi'
:param filename: (str) name of the file
:return: (bool)
"""
list_extensions = ['.mpg', '.MPG', '.mp4', '.MP4', '.AVI', '.avi']
if filename[-4:] in list_extensions:
return True
else:
return False
def get_video_files_in_dir(dir_dataset):
"""
Given a directory checks if there are any video files and return the absolute path of the video files in a list
:param dir_dataset: (str) directory
:return: (list) list of video files
"""
initial_list_files = os.listdir(dir_dataset)
list_folders = []
list_video_files = []
for file_name in initial_list_files:
if os.path.isdir(dir_dataset + file_name):
list_folders.append(file_name)
else:
if file_name[-4:] not in list_video_files:
list_video_files.append(dir_dataset + file_name)
for folder in list_folders:
list_files = os.listdir(dir_dataset + folder)
for file_name in list_files:
if file_name[-4:] not in list_video_files:
list_video_files.append(''.join([dir_dataset, folder, '/', file_name]))
return list_video_files
def analyze_video_dataset(dir_dataset):
"""
Analyzes a dataset of video showing the number of frames of each video
:param dir_dataset: (str) directory of the dataset
:return:
"""
list_video_files = get_video_files_in_dir(dir_dataset)
print(f"found {len(list_video_files)} video files")
num_frames = []
name_videos = []
for path_to_video in list_video_files:
cap = cv2.VideoCapture(path_to_video)
name_videos.append(path_to_video.replace(dir_dataset, ''))
num_frames.append(cap.get(cv2.CAP_PROP_FRAME_COUNT))
df = pd.DataFrame(data={"name file": name_videos, "num frames": num_frames})
def find_pattern_names(string_name, str_pattern):
"""
Looks for a pattern name in a string and returns the number after it
:param string_name: the string where to look for a pattern
:param str_pattern: the pattern that needs to be found
:return:
"""
match = re.search(str_pattern + '(\d+)', string_name)
if match:
return match.group(1)
else:
return np.nan
def determine_type_procedure(file_name):
"""
Determine which type of procedure is according to the name of the file
:param file_name:
:return:
"""
types_procedures = ['cys', 'urs']
for kind in types_procedures:
if kind in file_name:
return kind
def analyze_dataset_patterns(dataset_dir, pattern_str):
"""
Analyze a dataset to find a patter after a string
:param dataset_dir:
:param pattern_str:
:return:
"""
list_files = os.listdir(dataset_dir)
unique_names = []
for file_name in list_files:
pattern = find_pattern_names(file_name, pattern_str)
type_procedure = determine_type_procedure(file_name)
combination = [type_procedure, pattern]
if combination not in unique_names:
unique_names.append(combination)
return unique_names
def read_mask(dir_image):
"""
:param dir_image:
:return:
"""
original_img = cv2.imread(dir_image)
if original_img is None:
print('Could not open or find the image:', dir_image)
exit(0)
height, width, depth = original_img.shape
img = cv2.resize(original_img, (256, 256))
img = img / 255
img = (img > 0.9) * 1.0
return img
def read_img_results(dir_image):
"""
:param dir_image:
:return:
"""
original_img = cv2.imread(dir_image)
if original_img is None:
print('Could not open or find the image:', dir_image)
exit(0)
height, width, depth = original_img.shape
img = cv2.resize(original_img, (256, 256))
return img
def compare_box_plots(general_directory = '', name_test_csv_file='', name_validation_csv_file='',
save_directory='', condition_name=''):
"""
:param general_directory:
:param name_test_csv_file:
:param name_validation_csv_file:
:param save_directory:
:param condition_name:
:return:
2DO: Handle list of dirs and exclusion conditions
"""
predictions_path = ''.join([general_directory, 'predictions'])
prediction_folders = sorted([f for f in os.listdir(predictions_path)])
file_names = []
dsc_values = {}
prec_values = {}
rec_values = {}
acc_values = {}
if general_directory != '' and type(general_directory) == str:
csv_files = sorted([f for f in os.listdir(general_directory) if 'evaluation_results' in f and f.endswith('.csv')])
print(csv_files)
count_id = 0
for i, folder in enumerate(prediction_folders):
if folder in csv_files[i]:
file_names.append(folder)
else:
file_names.append('dataset_'+str(count_id))
count_id =+1
data_file = pd.read_csv(general_directory + csv_files[i])
dsc_values[file_names[i]] = data_file['DSC'].tolist()
prec_values[file_names[i]] = data_file['Precision'].tolist()
rec_values[file_names[i]] = data_file['Recall'].tolist()
acc_values[file_names[i]] = data_file['Accuracy'].tolist()
else:
pass
dsc_data = pd.DataFrame.from_dict(dsc_values, orient='index').T
prec_data = pd.DataFrame.from_dict(prec_values, orient='index').T
rec_data = pd.DataFrame.from_dict(rec_values, orient='index').T
acc_data = pd.DataFrame.from_dict(acc_values, orient='index').T
# build the image to plot
fig1 = plt.figure(1, figsize=(11,7))
ax1 = fig1.add_subplot(221)
ax1 = sns.boxplot(data=dsc_data)
ax1 = sns.swarmplot(data=dsc_data, color=".25")
ax1.set_ylim([0, 1.0])
ax1.title.set_text('$DSC$')
ax2 = fig1.add_subplot(222)
ax2 = sns.boxplot(data=prec_data)
ax2 = sns.swarmplot(data=prec_data, color=".25")
ax1.set_ylim([0, 1.0])
ax2.title.set_text('$PREC$')
ax3 = fig1.add_subplot(223)
ax3 = sns.boxplot(data=rec_data)
ax3 = sns.swarmplot(data=rec_data, color=".25")
ax1.set_ylim([0, 1.0])
ax3.title.set_text('$REC$')
ax4 = fig1.add_subplot(224)
ax4 = sns.boxplot(data=acc_data)
ax4 = sns.swarmplot(data=acc_data, color=".25")
ax1.set_ylim([0, 1.0])
ax4.title.set_text('$ACC$')
plt.show()
if save_directory == '':
save_directory = general_directory
date_analysis = datetime.datetime.now()
# ID name for the plot results
plot_save_name = ''.join([save_directory + 'boxplots_results_',
date_analysis.strftime("%d_%m_%Y_%H_%M"),
'_.png'])
plt.savefig(plot_save_name)
plt.close()
text_file_name = ''.join([save_directory + 'data_used_boxplots',
date_analysis.strftime("%d_%m_%Y_%H_%M"),
'_.txt'])
textfile = open(text_file_name, "w")
np.savetxt(textfile, csv_files, delimiter="\n", fmt="%s")
textfile.close()
def get_confusion_matrix_intersection_mats(groundtruth, predicted):
""" Returns dict of 4 boolean numpy arrays with True at TP, FP, FN, TN
"""
confusion_matrix_arrs = {}
groundtruth_inverse = np.logical_not(groundtruth)
predicted_inverse = np.logical_not(predicted)
confusion_matrix_arrs['tp'] = np.logical_and(groundtruth, predicted)
confusion_matrix_arrs['tn'] = np.logical_and(groundtruth_inverse, predicted_inverse)
confusion_matrix_arrs['fp'] = np.logical_and(groundtruth_inverse, predicted)
confusion_matrix_arrs['fn'] = np.logical_and(groundtruth, predicted_inverse)
return confusion_matrix_arrs
def get_confusion_matrix_overlaid_mask(image, groundtruth, predicted, alpha, colors):
"""
Returns overlay the 'image' with a color mask where TP, FP, FN, TN are
each a color given by the 'colors' dictionary
"""
# image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
masks = get_confusion_matrix_intersection_mats(groundtruth, predicted)
color_mask = np.zeros_like(image, dtype=np.float32)
for label, mask in masks.items():
color = colors[label]
mask_rgb = np.zeros_like(image, dtype=np.float32)
# mask_rgb = mask_rgb.astype(int)
size_x, size_y, channels = np.shape(mask)
plt.figure()
plt.title(label)
plt.imshow(mask.astype(np.float32))
for x_index in range(size_x):
for y_index in range(size_y):
if mask[
x_index, y_index, 0] != 0: # and mask[x_index, y_index, 1] == 0 and mask[x_index, y_index, 2] == 0:
mask_rgb[x_index, y_index, :] = color
# print(mask_rgb[x_index, y_index, :])
color_mask += mask_rgb
plt.close()
"""for label, mask in masks.items():
color = colors[label]
mask_rgb = np.zeros_like(image)
mask_rgb[mask != 0] = color
color_mask += mask_rgb
return cv2.addWeighted(image, alpha, color_mask, 1 - alpha, 0)"""
return color_mask.astype(np.float32) # cv2.addWeighted(image, 0.1, color_mask, 0.5, 0)
def compare_results_overlay(experiment_id = '', base_directory= '', dir_predictions='', selected_data = 'test',
dir_groundtruth='', dir_csv_file='', save_directory=''):
"""
:param experiment_id:
:param base_directory:
:param dir_predictions:
:param selected_data:
:param dir_groundtruth:
:param dir_csv_file:
:param save_directory:
:return:
"""
if experiment_id != '':
if base_directory !='':
directory_experiment = ''.join([base_directory, 'results/', experiment_id])
list_predicted_dataset = [folder for folder in os.listdir(directory_experiment + '/predictions/') if selected_data in folder]
csv_file_name = [f for f in os.listdir(directory_experiment) if f.endswith('.csv') and 'evaluation_results_' in f][0]
dir_predictions = ''.join([base_directory, 'results/', experiment_id, '/predictions/', list_predicted_dataset[0], '/'])
path_images_folder = ''.join([base_directory, 'dataset/', list_predicted_dataset[0], '/images/'])
path_masks_folder = ''.join([base_directory, 'dataset/', list_predicted_dataset[0], '/masks/'])
data_results = pd.read_csv(''.join([directory_experiment, '/', csv_file_name]))
else:
sys.exit("base_directory needed")
else:
path_images_folder = dir_groundtruth + 'images/'
path_masks_folder = dir_groundtruth + 'masks/'
data_results = pd.read_csv(dir_csv_file)
list_dice_values = data_results['DSC'].tolist()
list_imgs_csv = data_results['Image'].tolist()
if save_directory == '':
save_directory = ''.join([directory_experiment, '/overlay_results/'])
if not(os.path.isdir(save_directory)):
os.mkdir(save_directory)
image_list = [f for f in os.listdir(path_images_folder) if os.path.isfile(os.path.join(path_images_folder, f))]
mask_list = [f for f in os.listdir(path_masks_folder) if os.path.isfile(os.path.join(path_masks_folder, f))]
predicted_masks = [f for f in os.listdir(dir_predictions) if os.path.isfile(os.path.join(dir_predictions, f))]
for image in predicted_masks[:]:
if image in mask_list:
path_image = ''.join([path_images_folder, image])
path_mask = ''.join([path_masks_folder, image])
path_predicted = ''.join([dir_predictions, image])
image_frame = read_img_results(path_image)
mask_image = read_mask(path_mask)
for counter, element in enumerate(list_imgs_csv):
print(element)
if image == element:
dice_value = float(list_dice_values[counter])
predicted_mask = read_mask(path_predicted)
dice_value = float("{:.3f}".format(dice_value))
alpha = 0.5
confusion_matrix_colors = {
'tp': (50, 100, 0), # cyan
'fp': (125, 0, 125), # magenta
'fn': (0, 100, 50), # blue
'tn': (0, 0, 0) # black
}
overlay = get_confusion_matrix_overlaid_mask(image_frame, mask_image, predicted_mask, alpha,
confusion_matrix_colors)
my_dpi = 96
# Use the one bellow
#fig = plt.figure()
#ax1 = fig1.add_subplot(131)
plt.figure(3, figsize=(640 / my_dpi, 480 / my_dpi), dpi=my_dpi)
plt.subplot(141)
title = 'DSC: ' + str(dice_value)
plt.title(title)
plt.imshow(image_frame)
plt.axis('off')
plt.subplot(142)
plt.title('Mask')
plt.imshow(mask_image)
plt.axis('off')
plt.subplot(143)
plt.title('Predicted')
plt.imshow(predicted_mask)
plt.axis('off')
plt.subplot(144)
plt.title('Overlay')
plt.imshow(overlay)
plt.axis('off')
plt.savefig(''.join([save_directory, image]))
plt.close()
def dice(im1, im2, empty_score=1.0):
"""
Computes the Dice coefficient, a measure of set similarity.
Parameters
----------
im1 : array-like, bool
Any array of arbitrary size. If not boolean, will be converted.
im2 : array-like, bool
Any other array of identical size. If not boolean, will be converted.
Returns
-------
dice : float
Dice coefficient as a float on range [0,1].
Maximum similarity = 1
No similarity = 0
Both are empty (sum eq to zero) = empty_score
Notes
-----
The order of inputs for `dice` is irrelevant. The result will be
identical if `im1` and `im2` are switched.
"""
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
im_sum = im1.sum() + im2.sum()
if im_sum == 0:
return empty_score
# Compute Dice coefficient
intersection = np.logical_and(im1, im2)
return 2. * intersection.sum() / im_sum
def calculate_rates(image_1, image_2):
"""
Takes two black and white images and calculates recall, precision,
average precision and accuracy
:param image_1: array
:param image_2: array
:return: list with the values of precision, recall, average precision and accuracy
"""
image_1 = np.asarray(image_1).astype(np.bool)
image_2 = np.asarray(image_2).astype(np.bool)
image_1 = image_1.flatten()
image_2 = image_2.flatten()
if image_1.shape != image_2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
accuracy_value = accuracy_score(image_1, image_2)
if (np.unique(image_1) == [False]).all() and (np.unique(image_1) == [False]).all():
recall_value = 1.
precision_value = 1.
average_precision = 1.
else:
recall_value = recall_score(image_1, image_2)
precision_value = precision_score(image_1, image_2)
average_precision = average_precision_score(image_1, image_2)
return precision_value, recall_value, average_precision, accuracy_value
def calculate_performance(dir_results, dir_groundtruth):
"""
Calculate the performance metrics given two directories with results dataset and ground truth dataset
The performance metrics calculated are: Dice Coefficient, Precision, Recall, Average Precision and
Accuracy.
:param dir_results: Directory of the results images
:param dir_groundtruth: Directory of the ground truth images
:return: Pandas Dataframe with the image name and the metrics
"""
imgs_name = []
dice_values = []
precision_values = []
recall_values = []
avg_precision_values = []
accuracy_values = []
img_list_results = sorted([file for file in os.listdir(dir_results) if file.endswith('.png')])
img_groundtruth = sorted([file for file in os.listdir(dir_groundtruth) if file.endswith('.png')])
for i, image in enumerate(tqdm.tqdm(img_list_results, desc=f'Analyzing {len(img_list_results)} images')):
if image in img_groundtruth:
img1 = read_img_results(''.join([dir_results, '/', image]))
img2 = read_img_results(dir_groundtruth + image)
imgs_name.append(image)
dice_values.append(dice(img1, img2))
precision, recall, average_precision, accuracy = calculate_rates(img1, img2)
precision_values.append(precision)
recall_values.append(recall)
avg_precision_values.append(average_precision)
accuracy_values.append(accuracy)
data_results = pd.DataFrame(np.array([imgs_name, dice_values, precision_values, recall_values,
avg_precision_values, accuracy_values]).T,
columns=['Image', 'DSC', 'Precision', 'Recall', 'Avg. Precision', 'Accuracy'])
return data_results
def analyze_performances(project_dir, exclude=[]):
"""
Analyze the performance of a directory or a list of directories
:param project_dir:
:param exclude:
:return:
"""
# 2DO: recognize directory or list
if type(project_dir) == str:
results_list = sorted(os.listdir(project_dir + 'results/'))
results_list.remove('temp')
results_list.remove('analysis')
if exclude:
for elem in exclude:
exclude.remove(elem)
for experiment_id in results_list:
print(experiment_id)
folders_prediction = os.listdir(os.path.join(project_dir, 'results', experiment_id, 'predictions'))
for folder in folders_prediction:
dir_results = os.path.join(project_dir, 'results', experiment_id, 'predictions', folder)
dir_gt = os.path.join(project_dir, 'dataset', folder, 'masks/')
if os.path.isdir(dir_gt):
results_data = calculate_performance(dir_results, dir_gt)
name_results_file = ''.join([project_dir, 'results/', experiment_id, '/',
'evaluation_results_', folder, '_',experiment_id, '_.csv'])
results_data.to_csv(name_results_file)
print(f"results saved at: {name_results_file}")
else:
print(f' folder: {dir_gt} not found')
def save_boxplots(project_dir):
"""
Given a folder with results it saves the boxplots of the datasets were inferences were made
you need to have inside your directory a folder "predictions" and the csv files with the precitions
for each of folder(s)
:param project_dir: (str) directory to analyze
:return:
"""
if type(project_dir) == str:
compare_box_plots(project_dir)
#olders_prediction = os.listdir(os.path.join(project_dir, 'results', experiment_id, 'predictions'))
elif type(project_dir) == list:
results_list = os.listdir(project_dir + 'results/')
results_list.remove('temp')
for experiment_id in results_list:
folders_prediction = os.listdir(os.path.join(project_dir, 'results', experiment_id, 'predictions'))
for folder in folders_prediction:
dir_results = os.path.join(project_dir, 'results', experiment_id, 'predictions', folder)
dir_gt = os.path.join(project_dir, 'dataset', folder, 'masks/')
if os.path.isdir(dir_gt):
results_data = calculate_performance(dir_results, dir_gt)
name_results_file = ''.join([project_dir, 'results/', experiment_id, '/',
'evaluation_results_', folder, '_',experiment_id, '_.csv'])
results_data.to_csv(name_results_file)
print(f"results saved at: {name_results_file}")
else:
print(f' folder: {dir_gt} not found')
else:
print('type(project dir) not understood')
def extract_information_from_name(string_name):
"""
:param string_name:
:return:
"""
model_name = re.search('evaluation_results_test_0_(.+?)_lr_', string_name).group(1)
date_experiment = re.search('_rgb_(.+?)_.csv', string_name).group(1)
lr = re.search('lr_(.+?)_', string_name).group(1)
bs = re.search('bs_(.+?)_', string_name).group(1)
return lr, bs, model_name, date_experiment
def plot_training_history(list_csv_files, save_dir=''):
"""
Plots the training history of a model given the list of csv files (in case that there are different training stages)
Parameters
----------
list_csv_files (list): list of the csv files with the training history
save_dir (str): The directory where to save the file, if empty, the current working directory
Returns
-------
"""
if len(list_csv_files) > 1:
print(list_csv_files[0])
fine_tune_history = pd.read_csv(list_csv_files[0])
fine_tune_lim = fine_tune_history['epoch'].tolist()[-1]
header_1 = fine_tune_history.columns.values.tolist()
train_history = pd.read_csv(list_csv_files[-1])
header_2 = train_history.columns.values.tolist()
# mix the headers in case they are different among files
dictionary = {header_2[i]:name for i, name in enumerate(header_1)}
train_history.rename(columns=dictionary, inplace=True)
# append the dataframes in a single one
train_history = fine_tune_history.append(train_history, ignore_index=True)
else:
fine_tune_lim = 0
train_history = pd.read_csv(list_csv_files[0])
fig = plt.figure(1, figsize=(12, 9))
ax1 = fig.add_subplot(221)
ax1.title.set_text('$ACC$')
ax1.plot(train_history['accuracy'].tolist(), label='train')
ax1.plot(train_history['val_accuracy'].tolist(), label='val')
ax1.fill_between((0, fine_tune_lim), 0, 1, facecolor='orange', alpha=0.4)
plt.legend(loc='best')
ax2 = fig.add_subplot(222)
ax2.title.set_text('PREC')
ax2.plot(train_history['precision'].tolist(), label='train')
ax2.plot(train_history['val_precision'].tolist(), label='val')
ax2.fill_between((0, fine_tune_lim), 0, 1, facecolor='orange', alpha=0.4)
plt.legend(loc='best')
ax3 = fig.add_subplot(223)
ax3.title.set_text('$LOSS$')
ax3.plot(train_history['loss'].tolist(), label='train')
ax3.plot(train_history['val_loss'].tolist(), label='val')
max_xval = np.amax([train_history['loss'].tolist(), train_history['val_loss'].tolist()])
ax3.fill_between((0, fine_tune_lim), 0, max_xval, facecolor='orange', alpha=0.4)
plt.legend(loc='best')
ax4 = fig.add_subplot(224)
ax4.title.set_text('$REC$')
ax4.plot(train_history['recall'].tolist(), label='train')
ax4.plot(train_history['val_recall'].tolist(), label='val')
ax4.fill_between((0, fine_tune_lim), 0, 1, facecolor='orange', alpha=0.4)
plt.legend(loc='best')
if save_dir == '':
dir_save_figure = os.getcwd() + '/training_history.png'
else:
dir_save_figure = save_dir + 'training_history.png'
print(f'figure saved at: {dir_save_figure}')
plt.savefig(dir_save_figure)
plt.close()
def analyze_dataset_distribution(dataset_dir, plot_figure=False, dir_save_fig=''):
os.environ.pop("QT_QPA_PLATFORM_PLUGIN_PATH")
list_cases = sorted([f for f in os.listdir(dataset_dir) if os.path.isdir(dataset_dir + f)])
cases_ocurence = {'CIS WLI': 0, 'CIS NBI': 0, 'HGC WLI': 0, 'HGC NBI': 0, 'HLT WLI': 0, 'HLT NBI': 0,
'LGC WLI': 0, 'LGC NBI': 0, 'NTL WLI': 0, 'NTL NBI': 0}
class_cases_dict = {case: copy.copy(cases_ocurence) for case in list_cases}
unique_combinations = list()
total_imgs = list()
for case in list_cases[:]:
combinations = list()
csv_file = [f for f in os.listdir(dataset_dir + case) if f.endswith('.csv')].pop()
csv_file_dir = os.path.join(dataset_dir, case, csv_file)
df = pd.read_csv(csv_file_dir)
list_tissue_types = df['tissue type'].tolist()
list_imaging_type = df['imaging type'].tolist()
# obtain all the unique combinations in the different cases
for i, tissue in enumerate(list_tissue_types):
combination = ''.join([tissue, ' ', list_imaging_type[i]])
combinations.append(combination)
if combination not in unique_combinations:
unique_combinations.append(combination)
total_imgs.append(len(combinations))
for combination in np.unique(combinations):
class_cases_dict[case][combination] = combinations.count(combination)
# create an empty array
plot_array = np.zeros([len(list_cases), len(unique_combinations)])
normalized_array = copy.copy(plot_array)
# Now lets fill the array that corresponds to the ocurrence of each class for each patient case
for i, case in enumerate(list_cases[:]):
for j, key in enumerate(class_cases_dict[case].keys()):
plot_array[i][j] = class_cases_dict[case][key]
normalized_array[i][j] = class_cases_dict[case][key]/total_imgs[i]
xticklabels = list(class_cases_dict[case].keys())
print(cases_ocurence)
plt.figure()
labels = np.asarray(plot_array).reshape(len(list_cases), len(unique_combinations))
sns.heatmap(normalized_array, cmap='YlOrBr', cbar=False, linewidths=.5,
yticklabels=list_cases, xticklabels=xticklabels, annot=labels)
plt.xlabel('Classes')
plt.ylabel('Cases')
plt.show()
def compute_confusion_matrix(gt_data, predicted_data, plot_figure=False, dir_save_fig=''):
"""
Compute the confusion Matrix given the ground-truth data (gt_data) and predicted data (predicted_data)
in list format. If Plot is True shows the matrix .
Parameters
----------
gt_data : list
predicted_data : list
plot_figure :
dir_save_fig :
Returns
-------
"""
uniques_predicts = np.unique(predicted_data)
uniques_gt = np.unique(gt_data)
if collections.Counter(uniques_gt) == collections.Counter(uniques_predicts):
uniques = uniques_gt
else:
uniques = np.unique([*uniques_gt, *uniques_predicts])
ocurrences = [gt_data.count(unique) for unique in uniques]
conf_matrix = confusion_matrix(gt_data, predicted_data)
group_percentages = [conf_matrix[i]/ocurrences[i] for i, row in enumerate(conf_matrix)]
size = len(list(uniques))
list_uniques = list(uniques)
xlabel_names = list()
for name in list_uniques:
# if the name of the unique names is longer than 4 characters will split it
if len(name) > 4:
name_split = name.split('-')
new_name = ''
for splits in name_split:
new_name = new_name.join([splits[0]])
xlabel_names.append(new_name)
else:
xlabel_names.append(name)
labels = np.asarray(group_percentages).reshape(size, size)
sns.heatmap(group_percentages, cmap='Blues', cbar=False, linewidths=.5,
yticklabels=list(uniques), xticklabels=list(xlabel_names), annot=labels)
plt.xlabel('Predicted Class')
plt.ylabel('Real Class')
if plot_figure is True:
plt.show()
if dir_save_fig == '':
dir_save_figure = os.getcwd() + '/confusion_matrix.png'
else:
if not dir_save_fig.endswith('.png'):
dir_save_figure = dir_save_fig + 'confusion_matrix.png'
else:
dir_save_figure = dir_save_fig
print(f'figure saved at: {dir_save_figure}')
plt.savefig(dir_save_figure)
plt.close()
return conf_matrix
def analyze_multiclass_experiment(gt_data_file, predictions_data_dir, plot_figure=False, dir_save_figs=None,
analyze_training_history=False):
"""
Analyze the results of a multi-class classification experiment
Parameters
----------
gt_data_file :
predictions_data_dir :
plot_figure :
dir_save_fig :
Returns
-------
History plot, Confusion Matrix
"""
wli_imgs = []
nbi_imgs = []
predictions_nbi = []
predictions_wli = []
wli_tissue_types = []
nbi_tissue_types = []
list_prediction_files = [f for f in os.listdir(predictions_data_dir) if 'predictions' in f and '(_pre' not in f]
file_predictiosn = list_prediction_files.pop()
path_file_predictions = predictions_data_dir + file_predictiosn
print(f'file predictions found: {file_predictiosn}')
df_ground_truth = pd.read_csv(gt_data_file)
df_preditc_data = pd.read_csv(path_file_predictions)
predictions_names = df_preditc_data['fname'].tolist()
predictions_vals = df_preditc_data['over all'].tolist()
gt_names = df_ground_truth['image_name'].tolist()
gt_vals = df_ground_truth['tissue type'].tolist()
imaging_type = df_ground_truth['imaging type'].tolist()
existing_gt_vals = list()
ordered_predictiosn = list()
for name in predictions_names:
if name in gt_names:
index = predictions_names.index(name)
ordered_predictiosn.append(predictions_vals[index])
index_gt = gt_names.index(name)
existing_gt_vals.append(gt_vals[index_gt])
if imaging_type[index_gt] == 'NBI':
nbi_imgs.append(name)
predictions_nbi.append(predictions_vals[index])
nbi_tissue_types.append(gt_vals[index_gt])
if imaging_type[index_gt] == 'WLI':
wli_imgs.append(name)
predictions_wli.append(predictions_vals[index])
wli_tissue_types.append(gt_vals[index_gt])
# dri to save the figures
if dir_save_figs:
dir_save_fig = dir_save_figs
else:
dir_save_fig = predictions_data_dir
data_yaml = {'Accuracy ALL ': float(accuracy_score(existing_gt_vals, ordered_predictiosn)),
'Accuracy WLI ': float(accuracy_score(wli_tissue_types, predictions_wli)),
'Accuracy NBI ': float(accuracy_score(nbi_tissue_types, predictions_nbi))
}
# ACCURACY
print('Accuracy ALL: ', accuracy_score(existing_gt_vals, ordered_predictiosn))
print('Accuracy WLI: ', accuracy_score(wli_tissue_types, predictions_wli))
print('Accuracy NBI: ', accuracy_score(nbi_tissue_types, predictions_nbi))
# Precision
print('Precision ALL: ', precision_score(existing_gt_vals, ordered_predictiosn, average=None))
print('Precision WLI: ', precision_score(wli_tissue_types, predictions_wli, average=None))
print('Precision NBI: ', precision_score(nbi_tissue_types, predictions_nbi, average=None, zero_division=1))
# Recall
print('Recall ALL: ', recall_score(existing_gt_vals, ordered_predictiosn, average=None))
print('Recall WLI: ', recall_score(wli_tissue_types, predictions_wli, average=None))
print('Recall NBI: ', recall_score(nbi_tissue_types, predictions_nbi, average=None, zero_division=1))
# Confusion Matrices
compute_confusion_matrix(existing_gt_vals, ordered_predictiosn, plot_figure=False,
dir_save_fig=dir_save_fig + 'confusion_matrix_all.png')
compute_confusion_matrix(wli_tissue_types, predictions_wli,
dir_save_fig=dir_save_fig + 'confusion_matrix_wli.png')
compute_confusion_matrix(nbi_tissue_types, predictions_nbi,
dir_save_fig=dir_save_fig + 'confusion_matrix_nbi.png')
dir_data_yaml = dir_save_fig + 'performance_analysis.yaml'
save_yaml(dir_data_yaml, data_yaml)
gt_values = []
for name in predictions_names:
if name in gt_names:
index = gt_names.index(name)
gt_values.append(gt_vals[index])
new_df = df_preditc_data.copy()
data_top = list(new_df.columns)
new_df.insert(len(data_top), "real values", gt_values, allow_duplicates=True)
name_data_save = path_file_predictions
new_df.to_csv(name_data_save, index=False)
print(f'results saved at {name_data_save}')
# analyze the history
if analyze_training_history is True:
list_history_files = [f for f in os.listdir(predictions_data_dir) if 'train_history' in f]
ordered_history = list()
fine_tune_file = [f for f in list_history_files if 'fine_tune' in f]
if fine_tune_file:
fine_tune_file_dir = predictions_data_dir + fine_tune_file.pop()
ordered_history.append(fine_tune_file_dir)
ordered_history.append(predictions_data_dir + list_history_files[-1])
plot_training_history(ordered_history, save_dir=dir_save_fig)
def compare_experiments(dir_folder_experiments, selection_criteria=['evaluation_results_test_0'], dir_save_results='',
exclude=[], top_results=1.0):
"""
Compre the DSC, Prec, Rec and ACC of different experiments and save the boxplots comparison
:param dir_folder_experiments:
:param selection_criteria:
:param dir_save_results:
:param top_results:
:return:
"""
date_analysis = datetime.datetime.now()
names_analyzed_files = []
dsc_values = {}
prec_values = {}
rec_values = {}
acc_values = {}
median_dsc = []
list_experiments = [dir_folder for dir_folder in sorted(glob.glob(dir_folder_experiments + '*' )) if 'analysis' not in dir_folder or 'temp' not in dir_folder]
if exclude:
for experiment_folder in list_experiments:
for exclusion_case in exclude:
if exclusion_case in experiment_folder:
list_experiments.remove(experiment_folder)
for j, dir_experiment in enumerate(list_experiments):
for selection in selection_criteria:
list_results_files = [f for f in os.listdir(dir_experiment) if selection in f]
for results in list_results_files:
names_analyzed_files.append(results)
data_file = pd.read_csv(os.path.join(dir_experiment, results))
dsc_values[results] = data_file['DSC'].tolist()
median_dsc.append(np.median(data_file['DSC'].tolist()))
prec_values[results] = data_file['Precision'].tolist()
rec_values[results] = data_file['Recall'].tolist()
acc_values[results] = data_file['Accuracy'].tolist()
zipped_results = [(x, y) for x, y in sorted(zip(median_dsc, names_analyzed_files), reverse=True)]
# save x.x% top results in a list
top_list = zipped_results[:int(top_results*len(zipped_results))]
dsc_values = {pair[1]: dsc_values[pair[1]] for pair in top_list if pair[1] in dsc_values}
prec_values = {pair[1]: prec_values[pair[1]] for pair in top_list if pair[1] in prec_values}
rec_values = {pair[1]: rec_values[pair[1]] for pair in top_list if pair[1] in rec_values}
acc_values = {pair[1]: acc_values[pair[1]] for pair in top_list if pair[1] in acc_values}
print_names = [f'{extract_information_from_name(file_name)[2]} bs:{extract_information_from_name(file_name)[0]} ' \
f'lr:{extract_information_from_name(file_name)[1]} DSC: {score:.2f}' for score, file_name in top_list]
dsc_data = pd.DataFrame.from_dict(dsc_values, orient='index').T
prec_data = pd.DataFrame.from_dict(prec_values, orient='index').T
rec_data = pd.DataFrame.from_dict(rec_values, orient='index').T
acc_data = pd.DataFrame.from_dict(acc_values, orient='index').T
alphabet_string = string.ascii_uppercase
alphabet_list = list(alphabet_string)
rename_headers = {element:alphabet_list[i] for i, element in enumerate(dsc_data)}
dsc_data = dsc_data.rename(rename_headers, axis=1)
prec_data = prec_data.rename(rename_headers, axis=1)
rec_data = rec_data.rename(rename_headers, axis=1)
# re-arrange the data for analysis
acc_data = acc_data.rename(rename_headers, axis=1)
dict_temp = pd.DataFrame.to_dict(acc_data)
new_dict = {}
index = 0
for element in dict_temp:
for i, sub_elem in enumerate(dict_temp[element]):
new_dict[index] = {'acc': dict_temp[element][i], 'experiment': element}
index += 1
new_acc_vals = | pd.DataFrame.from_dict(new_dict, orient='index') | pandas.DataFrame.from_dict |
from zipfile import ZipFile
import datetime
import calendar
import json
import pandas as pd
class DateNotValidException(Exception):
pass
class FeedNotValidException(Exception):
pass
REQUIRED_FILES = [
'agency.txt', 'stops.txt', 'routes.txt', 'trips.txt', 'stop_times.txt'
]
OPTIONAL_FILES = [
'calendar.txt', 'calendar_dates.txt', 'fare_attributes.txt',
'fare_rules.txt', 'shapes.txt', 'frequencies.txt', 'transfers.txt',
'pathways.txt', 'levels.txt', 'translations.txt', 'feed_info.txt',
'attributions.txt'
]
class GTFS:
"""A representation of a single static GTFS feed and associated data.
GTFS holds, as Pandas data frames, the various datasets as defined by the
GTFS static protocol (http://gtfs.org/reference/static). Optional datasets
are set to None if data is not passed.
:param agency: Transit agencies with service represented in this dataset.
:type agency: :py:mod:`pandas.DataFrame`
:param stops: Stops where vehicles pick up or drop off riders. Also defines
stations and station entrances.
:type stops: :py:mod:`pandas.DataFrame`
:param routes: Transit routes. A route is a group of trips that are
displayed to riders as a single service.
:type routes: :py:mod:`pandas.DataFrame`
:param trips: Trips for each route. A trip is a sequence of two or more
stops that occur during a specific time period.
:type trips: :py:mod:`pandas.DataFrame`
:param stop_times: Times that a vehicle arrives at and departs from stops for each trip.
:type stop_times: :py:mod:`pandas.DataFrame`
:param calendar: Service dates specified using a weekly schedule with
start and end dates. This file is required unless all dates of service
are defined in calendar_dates.txt.
:type calendar: :py:mod:`pandas.DataFrame`, conditionally required
:param calendar_dates: Exceptions for the services defined in `calendar`.
If `calendar` is omitted, then calendar_dates.txt is required and must contain all dates of service.
:type calendar_dates: :py:mod:`pandas.DataFrame`, conditionally required
:param fare_attributes: Fare information for a transit agency's routes.
:type fare_attributes: :py:mod:`pandas.DataFrame`, optional
:param fare_rules: Rules to apply fares for itineraries.
:type fare_rules: :py:mod:`pandas.DataFrame`, optional
:param shapes: Rules for mapping vehicle travel paths, sometimes referred
to as route alignments.
:type shapes: :py:mod:`pandas.DataFrame`, optional
:param frequencies: Headway (time between trips) for headway-based service
or a compressed representation of fixed-schedule service.
:type frequencies: :py:mod:`pandas.DataFrame`, optional
:param transfers: Rules for making connections at transfer points between
routes.
:type transfers: :py:mod:`pandas.DataFrame`, optional
:param pathways: Pathways linking together locations within stations.
:type pathways: :py:mod:`pandas.DataFrame`, optional
:param levels: Levels within stations.
:type levels: :py:mod:`pandas.DataFrame`, optional
:param feed_info: Dataset metadata, including publisher, version,
and expiration information.
:param translations: In regions that have multiple official languages,
transit agencies/operators typically have language-specific names and
web pages. In order to best serve riders in those regions, it is useful
for the dataset to include these language-dependent values..
:type translations: :py:mod:`pandas.DataFrame`, optional
:type feed_info: :py:mod:`pandas.DataFrame`, optional
:param attributions: Dataset attributions.
:type attributions: :py:mod:`pandas.DataFrame`, optional
:raises FeedNotValidException: An exception indicating an invalid feed.
"""
def __init__(self, agency, stops, routes, trips, stop_times,
calendar=None, calendar_dates=None, fare_attributes=None,
fare_rules=None, shapes=None, frequencies=None, transfers=None,
pathways=None, levels=None, translations=None, feed_info=None,
attributions=None):
"""Constructs and validates the datasets for the GTFS object.
All parameters should be valid Pandas DataFrame objects that follow
the structure corresponding to the dataset as defined by the GTFS
standard (http://gtfs.org/reference/static).
"""
# Mandatory Files
self.agency = agency
self.stops = stops
self.routes = routes
self.trips = trips
self.stop_times = stop_times
# Pairwise Mandatory Files
self.calendar = calendar
self.calendar_dates = calendar_dates
if self.calendar is None and self.calendar_dates is None:
raise FeedNotValidException("One of calendar or calendar_dates is required.")
# Optional Files
self.fare_attributes = fare_attributes
self.fare_rules = fare_rules
self.shapes = shapes
self.frequencies = frequencies
self.transfers = transfers
self.pathways = pathways
self.levels = levels
self.attributions = attributions
self.translations = translations
self.feed_info = feed_info
@staticmethod
def load_zip(filepath):
"""Creates a :class:`GTFS` object from a zipfile containing the
appropriate data.
:param filepath: The filepath of the zipped GTFS feed.
:type filepath: str
:return: A :class:`GTFS` object with loaded and validated data.
"""
with ZipFile(filepath, 'r') as zip_file:
# Deal with nested files
filepaths = dict()
print(REQUIRED_FILES + OPTIONAL_FILES)
for req in REQUIRED_FILES + OPTIONAL_FILES:
filepaths[req] = None
for file in zip_file.namelist():
for req in REQUIRED_FILES + OPTIONAL_FILES:
if req in file:
filepaths[req] = file
# Create pandas objects of the entire feed
agency = pd.read_csv(
zip_file.open(filepaths["agency.txt"]),
dtype={
'agency_id': str, 'agency_name': str, 'agency_url': str,
'agency_timezone': str, 'agency_lang': str,
'agency_phone': str, 'agency_fare_url': str,
'agency_email': str
},
skipinitialspace=True
)
stops = pd.read_csv(
zip_file.open(filepaths["stops.txt"]),
dtype={
'stop_id': str, 'stop_code': str, 'stop_name': str,
'stop_desc': str, 'stop_lat': float, 'stop_lon': float,
'zone_id': str, 'stop_url': str, 'location_type': 'Int64',
'parent_station': str, 'stop_timezone': str,
'wheelchair_boarding': 'Int64', 'level_id': str,
'platform_code': str
},
skipinitialspace=True
)
routes = pd.read_csv(
zip_file.open(filepaths["routes.txt"]),
dtype={
'route_id': str, 'agency_id': str, 'route_short_name': str,
'route_long_name': str, 'route_desc': str,
'route_type': int, 'route_url': str, 'route_color': str,
'route_text_color': str, 'route_short_order': int
},
skipinitialspace=True
)
trips = pd.read_csv(
zip_file.open(filepaths["trips.txt"]),
dtype={
'route_id': str, 'service_id': str, 'trip_id': str,
'trip_headsign': str, 'trip_short_name': str,
'direction_id': 'Int64', 'block_id': str, 'shape_id': str,
'wheelchair_accessible': 'Int64', 'bikes_allowed': 'Int64'
},
skipinitialspace=True
)
stop_times = pd.read_csv(
zip_file.open(filepaths["stop_times.txt"]),
dtype={
'trip_id': str, 'arrival_time': str, 'departure_time': str,
'stop_id': str, 'stop_sequence': int, 'stop_headsign': str,
'pickup_type': 'Int64', 'drop_off_type': 'Int64',
'shape_dist_traveled': float, 'timepoint': 'Int64'
},
skipinitialspace=True
)
if filepaths["calendar.txt"] in zip_file.namelist():
calendar = pd.read_csv(
zip_file.open(filepaths["calendar.txt"]),
dtype={
'service_id': str,'monday': bool, 'tuesday': bool,
'wednesday': bool, 'thursday': bool, 'friday': bool,
'saturday': bool, 'sunday': bool, 'start_date': str,
'end_date': str
},
parse_dates=['start_date', 'end_date'],
skipinitialspace=True
)
else:
calendar = None
if filepaths["calendar_dates.txt"] in zip_file.namelist():
calendar_dates = pd.read_csv(
zip_file.open(filepaths["calendar_dates.txt"]),
dtype={
'service_id': str, 'date': str, 'exception_type': int
},
parse_dates=['date'],
skipinitialspace=True
)
if calendar_dates.shape[0] == 0:
calendar_dates = None
else:
calendar_dates = None
if filepaths["fare_attributes.txt"] in zip_file.namelist():
fare_attributes = pd.read_csv(
zip_file.open(filepaths["fare_attributes.txt"]),
dtype={
'fare_id': str, 'price': float, 'currency_type': str,
'payment_method': int, 'transfers': 'Int64',
'agency_id': str, 'transfer_duration': 'Int64'
},
skipinitialspace=True
)
else:
fare_attributes = None
if filepaths["fare_rules.txt"] in zip_file.namelist():
fare_rules = pd.read_csv(
zip_file.open(filepaths["fare_rules.txt"]),
dtype={
'fare_id': str, 'route_id': str, 'origin_id': str,
'destination_id': str, 'contains_id': str
},
skipinitialspace=True
)
else:
fare_rules = None
if filepaths["shapes.txt"] in zip_file.namelist():
shapes = pd.read_csv(
zip_file.open(filepaths["shapes.txt"]),
dtype={
'shape_id': str, 'shape_pt_lat': float,
'shape_pt_lon': float, 'shape_pt_sequence': int,
'shape_dist_traveled': float
},
skipinitialspace=True
)
else:
shapes = None
if filepaths["frequencies.txt"] in zip_file.namelist():
frequencies = pd.read_csv(
zip_file.open(filepaths["frequencies.txt"]),
dtype={
'trip_id': str, 'start_time': str, 'end_time': str,
'headway_secs': int, 'exact_times': int
},
parse_dates=['start_time', 'end_time'],
skipinitialspace=True
)
else:
frequencies = None
if filepaths["transfers.txt"] in zip_file.namelist():
transfers = pd.read_csv(
zip_file.open(filepaths["transfers.txt"]),
dtype={
'from_stop_id': str, 'to_stop_id': str,
'transfer_type': 'Int64', 'min_transfer_time': 'Int64'
},
skipinitialspace=True
)
else:
transfers = None
if filepaths["pathways.txt"] in zip_file.namelist():
pathways = pd.read_csv(
zip_file.open(filepaths["pathways.txt"]),
dtype={
'pathway_id': str, 'from_stop_id': str,
'to_stop_id': str, 'pathway_mode': int,
'is_bidirectional': str, 'length': 'float64',
'traversal_time': 'Int64', 'stair_count': 'Int64',
'max_slope': 'float64', 'min_width': 'float64',
'signposted_as': str, 'reverse_signposted_as': str
},
skipinitialspace=True
)
else:
pathways = None
if filepaths["levels.txt"] in zip_file.namelist():
levels = pd.read_csv(
zip_file.open(filepaths["levels.txt"]),
dtype={
'level_id': str, 'level_index': float,
'level_name': str
},
skipinitialspace=True
)
else:
levels = None
if filepaths["translations.txt"] in zip_file.namelist():
translations = pd.read_csv(
zip_file.open(filepaths["translations.txt"]),
dtype={
'table_name': str, 'field_name': str, 'language': str,
'translation': str, 'record_id': str,
'record_sub_id': str, 'field_value': str
},
skipinitialspace=True
)
feed_info = pd.read_csv(
zip_file.open(filepaths["feed_info.txt"]),
dtype={
'feed_publisher_name': str, 'feed_publisher_url': str,
'feed_lang': str, 'default_lang': str,
'feed_start_date': str, 'feed_end_date': str,
'feed_version': str, 'feed_contact_email': str,
'feed_contact_url': str
},
skipinitialspace=True
)
elif filepaths["feed_info.txt"] in zip_file.namelist():
feed_info = pd.read_csv(
zip_file.open(filepaths["feed_info.txt"]),
dtype={
'feed_publisher_name': str, 'feed_publisher_url': str,
'feed_lang': str, 'default_lang': str,
'feed_start_date': str, 'feed_end_date': str,
'feed_version': str, 'feed_contact_email': str,
'feed_contact_url': str
},
skipinitialspace=True
)
translations=None
else:
translations = None
feed_info = None
if filepaths["attributions.txt"] in zip_file.namelist():
attributions = pd.read_csv(
zip_file.open("attributions.txt"),
dtype={
'attribution_id': str, 'agency_id': str,
'route_id': str, 'trip_id': str,
},
skipinitialspace=True
)
else:
attributions = None
return GTFS(agency, stops, routes, trips, stop_times,
calendar=calendar, calendar_dates=calendar_dates,
fare_attributes=fare_attributes, fare_rules=fare_rules,
shapes=shapes, frequencies=frequencies, transfers=transfers,
pathways=pathways, levels=levels, translations=translations,
feed_info=feed_info, attributions=attributions)
def summary(self):
""" Assemble a series of attributes summarizing the GTFS feed with the
following columns:
* *agencies*: list of agencies in feed
* *total_stops*: the total number of stops in the feed
* *total_routes*: the total number of routes in the feed
* *total_trips*: the total number of trips in the feed
* *total_stops_made*: the total number of stop_times events
* *first_date*: the first date the feed is valid for
* *last_date*: the last date the feed is valid for
* *total_shapes* (optional): the total number of shapes.
:returns: A :py:mod:`pandas.Series` containing the relevant data.
"""
summary = pd.Series(dtype=str)
summary['agencies'] = self.agency.agency_name.tolist()
summary['total_stops'] = self.stops.shape[0]
summary['total_routes'] = self.routes.shape[0]
summary['total_trips'] = self.trips.shape[0]
summary['total_stops_made'] = self.stop_times.shape[0]
if self.calendar is not None:
summary['first_date'] = self.calendar.start_date.min()
summary['last_date'] = self.calendar.end_date.max()
else:
summary['first_date'] = self.calendar_dates.date.min()
summary['last_date'] = self.calendar_dates.date.max()
if self.shapes is not None:
summary['total_shapes'] = self.shapes.shape[0]
return summary
def valid_date(self, date):
"""Checks whether the provided date falls within the feed's date range
:param date: A datetime object with the date to be validated.
:type date: :py:mod:`datetime.date`
:return: `True` if the date is a valid one, `False` otherwise.
:rtype: bool
"""
summary = self.summary()
if type(date) == str:
date = datetime.datetime.strptime(date, "%Y%m%d")
if summary.first_date > date or summary.last_date < date:
return False
else:
return True
def day_trips(self, date):
"""Finds all the trips that occur on a specified day. This method
accounts for exceptions included in the `calendar_dates` dataset.
:param date: The day to check
:type date: :py:mod:`datetime.date`
:return: A slice of the `trips` dataframe which corresponds to the
provided date.
:rtype: :py:mod:`pandas.DataFrame`
"""
# First, get all standard trips that run on that particular day of the week
if not self.valid_date(date):
raise DateNotValidException
dayname = date.strftime("%A").lower()
date_compare = pd.to_datetime(date)
if self.calendar is not None:
service_ids = self.calendar[(self.calendar[dayname] == 1) & (self.calendar.start_date <= date_compare) & (self.calendar.end_date >= date_compare)].service_id
if self.calendar_dates is not None:
service_ids = service_ids.append(self.calendar_dates[(self.calendar_dates.date == date_compare) & (self.calendar_dates.exception_type == 1)].service_id)
service_ids = service_ids[~service_ids.isin(self.calendar_dates[(self.calendar_dates.date == date_compare) & (self.calendar_dates.exception_type == 2)].service_id)]
else:
service_ids = self.calendar_dates[(self.calendar_dates.date == date_compare) & (self.calendar_dates.exception_type == 1)].service_id
return self.trips[self.trips.service_id.isin(service_ids)]
def stop_summary(self, date, stop_id):
"""Assemble a series of attributes summarizing a stop on a particular
day. The following columns are returned:
* *stop_id*: The ID of the stop summarized
* *total_visits*: The total number of times a stop is visited
* *first_arrival*: The earliest arrival of the bus for the day
* *last_arrival*: The latest arrival of the bus for the day
* *service_time*: The total service span, in hours
* *average_headway*: Average time in minutes between arrivals
:param date: The day to summarize
:type date: :py:mod:`datetime.date`
:param stop_id: The ID of the stop to summarize.
:type stop_id: str
:return: A :py:mod:`pandas.Series` object containing the summarized
data.
"""
# Create a summary of stops for a given stop_id
trips = self.day_trips(date)
stop_times = self.stop_times[self.stop_times.trip_id.isin(trips.trip_id) & (self.stop_times.stop_id == stop_id)]
summary = self.stops[self.stops.stop_id == stop_id].iloc[0]
summary['total_visits'] = len(stop_times.index)
summary['first_arrival'] = stop_times.arrival_time.min()
summary['last_arrival'] = stop_times.arrival_time.max()
summary['service_time'] = (int(summary.last_arrival.split(":")[0]) + int(summary.last_arrival.split(":")[1])/60.0 + int(summary.last_arrival.split(":")[2])/3600.0) - (int(stop_times.arrival_time.min().split(":")[0]) + int(stop_times.arrival_time.min().split(":")[1])/60.0 + int(stop_times.arrival_time.min().split(":")[2])/3600.0)
summary['average_headway'] = (summary.service_time/summary.total_visits)*60
return summary
def route_summary(self, date, route_id):
"""Assemble a series of attributes summarizing a route on a particular
day. The following columns are returned:
* *route_id*: The ID of the route summarized
* *total_trips*: The total number of trips made on the route that day
* *first_departure*: The earliest departure of the bus for the day
* *last_arrival*: The latest arrival of the bus for the day
* *service_time*: The total service span of the route, in hours
* *average_headway*: Average time in minutes between trips on the route
:param date: The day to summarize.
:type date: :py:mod:`datetime.date`
:param route_id: The ID of the route to summarize
:type route_id: str
:return: A :py:mod:`pandas.Series` object containing the summarized
data.
"""
trips = self.day_trips(date)
trips = trips[trips.route_id == route_id]
stop_times = self.stop_times[self.stop_times.trip_id.isin(trips.trip_id)]
summary = | pd.Series() | pandas.Series |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scikit_posthocs as sp
import warnings
import seaborn as sns
import statsmodels.api as sm
from bevel.linear_ordinal_regression import OrderedLogit
import scipy.stats as stats
warnings.filterwarnings("ignore")
from statsmodels.miscmodels.ordinal_model import OrderedModel
from scipy.stats import pearsonr, spearmanr
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tools.tools import add_constant
def reverse_personality(nombre):
if nombre == 5:
nombre = 1
elif nombre == 4:
nombre = 2
elif nombre == 2:
nombre = 4
elif nombre == 1:
nombre = 5
return nombre
def reverse(nombre):
if nombre == 7:
nombre = 1
elif nombre == 6:
nombre = 2
elif nombre == 5:
nombre = 4
elif nombre == 4:
nombre = 3
elif nombre == 3:
nombre = 4
elif nombre == 2:
nombre = 6
elif nombre == 1:
nombre = 7
return nombre
def make_dfinal(df):
df_trick = df.copy() #A CHANGER
cat_vars=["Age_Range", "Personality","Discipline", "Joueur", "Gender", "CSP", "Education", "Location"]
for var in cat_vars:
cat_list='var'+'_'+var
cat_list = pd.get_dummies(df_trick[var], prefix=var)
data1=df_trick.join(cat_list)
df_trick=data1
data_vars=df_trick.columns.values.tolist()
to_keep=[i for i in data_vars if i not in cat_vars]
data_final=df_trick[to_keep]
return data_final
def appen_TimeWTP(li):
li.append(df['WTPTime_Souls'][ind])
li.append(df['WTPTime_Shooter'][ind])
li.append(df['WTPTime_Puzzle'][ind])
li.append(df['WTPTime_RPG'][ind])
li.append(df['WTPTime_RTS'][ind])
li.append(df['WTPTime_Survival'][ind])
li.append(df['WTPTime_Multiplayer'][ind])
def appen_WTPlay(li):
li.append(df['WTPlay_Souls'][ind])
li.append(df['WTPlay_Shooter'][ind])
li.append(df['WTPlay_Puzzle'][ind])
li.append(df['WTPlay_RPG'][ind])
li.append(df['WTPlay_RTS'][ind])
li.append(df['WTPlay_Survival'][ind])
li.append(df['WTPlay_Multiplayer'][ind])
df = | pd.read_csv("df_complet.csv") | pandas.read_csv |
import time
import pandas as pd
import copy
import numpy as np
from shapely import affinity
from shapely.geometry import Polygon
import geopandas as gpd
def cal_arc(p1, p2, degree=False):
dx, dy = p2[0] - p1[0], p2[1] - p1[1]
arc = np.pi - np.arctan2(dy, dx)
return arc / np.pi * 180 if degree else arc
def helper_print_with_time(*arg, sep=','):
print(time.strftime("%H:%M:%S", time.localtime()), sep.join(map(str, arg)))
def cal_euclidean(p1, p2):
return np.linalg.norm([p1[0] - p2[0], p1[1] - p2[1]])
def get_shape_mbr(df_shape):
oid = 'OID' if 'FID' in df_shape.columns else 'OBJECTID'
df_mbr = copy.deepcopy(df_shape[[oid, 'geometry']])
df_mbr.reset_index(drop=True, inplace=True)
df_mbr['geometry'] = pd.Series([geo.minimum_rotated_rectangle for geo in df_mbr['geometry']])
df_mbr['xy'] = pd.Series([list(geo.exterior.coords) for geo in df_mbr['geometry']])
#
df_mbr['x0'] = pd.Series([xy[0][0] for xy in df_mbr['xy']])
df_mbr['x1'] = pd.Series([xy[1][0] for xy in df_mbr['xy']])
df_mbr['x2'] = pd.Series([xy[2][0] for xy in df_mbr['xy']])
df_mbr['y0'] = pd.Series([xy[0][1] for xy in df_mbr['xy']])
df_mbr['y1'] = pd.Series([xy[1][1] for xy in df_mbr['xy']])
df_mbr['y2'] = pd.Series([xy[2][1] for xy in df_mbr['xy']])
#
df_mbr['l1'] = pd.Series(
[cal_euclidean([x0, y0], [x1, y1]) for x0, y0, x1, y1 in df_mbr[['x0', 'y0', 'x1', 'y1']].values])
df_mbr['l2'] = pd.Series(
[cal_euclidean([x0, y0], [x1, y1]) for x0, y0, x1, y1 in df_mbr[['x1', 'y1', 'x2', 'y2']].values])
df_mbr['a1'] = pd.Series(
[cal_arc([x0, y0], [x1, y1], True) for x0, y0, x1, y1 in df_mbr[['x0', 'y0', 'x1', 'y1']].values])
df_mbr['a2'] = pd.Series(
[cal_arc([x0, y0], [x1, y1], True) for x0, y0, x1, y1 in df_mbr[['x1', 'y1', 'x2', 'y2']].values])
#
df_mbr['longer'] = df_mbr['l1'] >= df_mbr['l2']
#
df_mbr['lon_len'] = pd.Series([l1 if longer else l2 for l1, l2, longer in df_mbr[['l1', 'l2', 'longer']].values])
df_mbr['short_len'] = pd.Series([l2 if longer else l1 for l1, l2, longer in df_mbr[['l1', 'l2', 'longer']].values])
df_mbr['lon_arc'] = pd.Series([a1 if longer else a2 for a1, a2, longer in df_mbr[['a1', 'a2', 'longer']].values])
df_mbr['short_arc'] = | pd.Series([a2 if longer else a1 for a1, a2, longer in df_mbr[['a1', 'a2', 'longer']].values]) | pandas.Series |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = ('IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex([0, 1])
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_intervals([0, 1])
# invalid closed
msg = "invalid options for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed within intervals
msg = 'intervals must all be closed on the same side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with tm.assert_raises_regex(ValueError, msg):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# mismatched closed inferred from intervals vs constructor.
msg = 'conflicting values for closed'
with tm.assert_raises_regex(ValueError, msg):
iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
IntervalIndex(iv, closed='neither')
# no point in nesting periods in an IntervalIndex
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
# decreasing breaks/arrays
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(range(10, -1, -1))
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1))
def test_constructors_datetimelike(self, closed):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx, closed=closed)
expected = IntervalIndex.from_breaks(idx.values, closed=closed)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert not index.hasnans
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range('20130101', periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='foo')
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='bar')
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed)
assert not expected.equals(expected_other_closed)
def test_astype(self, closed):
idx = self.create_index(closed=closed)
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [
interval_range(0, periods=10, closed='neither'),
interval_range(1.7, periods=8, freq=2.5, closed='both'),
interval_range(Timestamp('20170101'), periods=12, closed='left'),
interval_range(Timedelta('1 day'), periods=6, closed='right'),
IntervalIndex.from_tuples([('a', 'd'), ('e', 'j'), ('w', 'z')]),
IntervalIndex.from_tuples([(1, 2), ('a', 'z'), (3.14, 6.28)])])
def test_insert(self, data):
item = data[0]
idx_item = IntervalIndex([item])
# start
expected = idx_item.append(data)
result = data.insert(0, item)
tm.assert_index_equal(result, expected)
# end
expected = data.append(idx_item)
result = data.insert(len(data), item)
tm.assert_index_equal(result, expected)
# mid
expected = data[:3].append(idx_item).append(data[3:])
result = data.insert(3, item)
tm.assert_index_equal(result, expected)
# invalid type
msg = 'can only insert Interval objects and NA into an IntervalIndex'
with tm.assert_raises_regex(ValueError, msg):
data.insert(1, 'foo')
# invalid closed
msg = 'inserted item must be closed on the same side as the index'
for closed in {'left', 'right', 'both', 'neither'} - {item.closed}:
with tm.assert_raises_regex(ValueError, msg):
bad_item = Interval(item.left, item.right, closed=closed)
data.insert(1, bad_item)
# GH 18295 (test missing)
na_idx = IntervalIndex([np.nan], closed=data.closed)
for na in (np.nan, pd.NaT, None):
expected = data[:1].append(na_idx).append(data[1:])
result = data.insert(1, na)
tm.assert_index_equal(result, expected)
def test_take(self, closed):
index = self.create_index(closed=closed)
result = index.take(range(10))
tm.assert_index_equal(result, index)
result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays(
[0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
def test_unique(self, closed):
# unique non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_unique
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed)
assert idx.is_unique
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_unique
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed)
assert idx.is_unique
# duplicate
idx = IntervalIndex.from_tuples(
[(0, 1), (0, 1), (2, 3)], closed=closed)
assert not idx.is_unique
# unique mixed
idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')], closed=closed)
assert idx.is_unique
# duplicate mixed
idx = IntervalIndex.from_tuples(
[(0, 1), ('a', 'b'), (0, 1)], closed=closed)
assert not idx.is_unique
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_unique
def test_monotonic(self, closed):
# increasing non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing non-overlapping
idx = IntervalIndex.from_tuples(
[(4, 5), (2, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (4, 5), (2, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping
idx = IntervalIndex.from_tuples(
[(0, 2), (0.5, 2.5), (1, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping
idx = IntervalIndex.from_tuples(
[(1, 3), (0.5, 2.5), (0, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered overlapping
idx = IntervalIndex.from_tuples(
[(0.5, 2.5), (0, 2), (1, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(2, 3), (1, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed)
assert idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed=closed)
assert i[0] == Interval(0.0, 1.0, closed=closed)
assert i[1] == Interval(1.0, 2.0, closed=closed)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed)
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed)
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed=closed)
tm.assert_index_equal(result, expected)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
pytest.raises(KeyError, idx.get_loc, 1.5)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)],
closed='both')
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(1, 2) == (0, 2)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
with pytest.raises(KeyError):
index.slice_locs(1, 2)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
pytest.raises(KeyError, self.index.get_loc, Interval(2, 3))
pytest.raises(KeyError, self.index.get_loc,
Interval(-1, 0, 'left'))
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self, closed):
expected = IntervalIndex.from_tuples(
[(0.0, 1.0), (1.0, 2.0)], closed=closed)
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays(
[0, 1, np.nan], [1, 2, np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
def test_non_contiguous(self, closed):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed)
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
def test_union(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(13), closed=closed)
result = index.union(other)
tm.assert_index_equal(result, expected)
result = other.union(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.union(index), index)
tm.assert_index_equal(index.union(index[:1]), index)
def test_intersection(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(5, 11), closed=closed)
result = index.intersection(other)
tm.assert_index_equal(result, expected)
result = other.intersection(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.intersection(index), index)
def test_difference(self, closed):
index = self.create_index(closed=closed)
tm.assert_index_equal(index.difference(index[:1]), index[1:])
def test_symmetric_difference(self, closed):
idx = self.create_index(closed=closed)
result = idx[1:].symmetric_difference(idx[:-1])
expected = IntervalIndex([idx[0], idx[-1]])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('op_name', [
'union', 'intersection', 'difference', 'symmetric_difference'])
def test_set_operation_errors(self, closed, op_name):
index = self.create_index(closed=closed)
set_op = getattr(index, op_name)
# test errors
msg = ('can only do set operations between two IntervalIndex objects '
'that are closed on the same side')
with tm.assert_raises_regex(ValueError, msg):
set_op(Index([1, 2, 3]))
for other_closed in {'right', 'left', 'both', 'neither'} - {closed}:
other = self.create_index(closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
set_op(other)
def test_isin(self, closed):
index = self.create_index(closed=closed)
expected = np.array([True] + [False] * (len(index) - 1))
result = index.isin(index[:1])
tm.assert_numpy_array_equal(result, expected)
result = index.isin([index[0]])
tm.assert_numpy_array_equal(result, expected)
other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed)
expected = np.array([True] * (len(index) - 1) + [False])
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
for other_closed in {'right', 'left', 'both', 'neither'}:
other = self.create_index(closed=other_closed)
expected = np.repeat(closed == other_closed, len(index))
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == IntervalIndex.from_breaks([0, 1, 2], 'left')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index.values == self.index
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index <= self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index != self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index > self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index.values > self.index
tm.assert_numpy_array_equal(actual, np.array([False, False]))
# invalid comparisons
actual = self.index == 0
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index == self.index.left
tm.assert_numpy_array_equal(actual, np.array([False, False]))
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index > 0
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index <= 0
with pytest.raises(TypeError):
self.index > np.arange(2)
with pytest.raises(ValueError):
self.index > np.arange(3)
def test_missing_values(self, closed):
idx = Index([np.nan, Interval(0, 1, closed=closed),
Interval(1, 2, closed=closed)])
idx2 = IntervalIndex.from_arrays(
[np.nan, 0, 1], [np.nan, 1, 2], closed=closed)
assert idx.equals(idx2)
with pytest.raises(ValueError):
IntervalIndex.from_arrays(
[np.nan, 0, 1], np.array([0, 1, 2]), closed=closed)
tm.assert_numpy_array_equal(isna(idx),
np.array([True, False, False]))
def test_sort_values(self, closed):
index = self.create_index(closed=closed)
result = index.sort_values()
tm.assert_index_equal(result, index)
result = index.sort_values(ascending=False)
tm.assert_index_equal(result, index[::-1])
# with nan
index = IntervalIndex([Interval(1, 2), np.nan, Interval(0, 1)])
result = index.sort_values()
expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan])
tm.assert_index_equal(result, expected)
result = index.sort_values(ascending=False)
expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)])
tm.assert_index_equal(result, expected)
def test_datetime(self):
dates = date_range('2000', periods=3)
idx = IntervalIndex.from_breaks(dates)
tm.assert_index_equal(idx.left, dates[:2])
tm.assert_index_equal(idx.right, dates[-2:])
expected = date_range('2000-01-01T12:00', periods=2)
tm.assert_index_equal(idx.mid, expected)
assert Timestamp('2000-01-01T12') not in idx
assert Timestamp('2000-01-01T12') not in idx
target = date_range('1999-12-31T12:00', periods=7, freq='12H')
actual = idx.get_indexer(target)
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_append(self, closed):
index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed)
index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed)
result = index1.append(index2)
expected = IntervalIndex.from_arrays(
[0, 1, 1, 2], [1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
result = index1.append([index1, index2])
expected = IntervalIndex.from_arrays(
[0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
msg = ('can only append two IntervalIndex objects that are closed '
'on the same side')
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
index_other_closed = IntervalIndex.from_arrays(
[0, 1], [1, 2], closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
index1.append(index_other_closed)
def test_is_non_overlapping_monotonic(self, closed):
# Should be True in all cases
tpls = [(0, 1), (2, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is True
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is True
# Should be False in all cases (overlapping)
tpls = [(0, 2), (1, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False in all cases (non-monotonic)
tpls = [(0, 1), (2, 3), (6, 7), (4, 5)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False for closed='both', overwise True (GH16560)
if closed == 'both':
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is False
else:
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is True
class TestIntervalRange(object):
def test_construction_from_numeric(self, closed, name):
# combinations of start/end/periods without freq
expected = IntervalIndex.from_breaks(
np.arange(0, 6), name=name, closed=closed)
result = interval_range(start=0, end=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=5, periods=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with freq
expected = IntervalIndex.from_tuples([(0, 2), (2, 4), (4, 6)],
name=name, closed=closed)
result = interval_range(start=0, end=6, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=3, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=6, periods=3, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
expected = IntervalIndex.from_tuples([(0.0, 1.5), (1.5, 3.0)],
name=name, closed=closed)
result = interval_range(start=0, end=4, freq=1.5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_construction_from_timestamp(self, closed, name):
# combinations of start/end/periods without freq
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-06')
breaks = date_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with fixed freq
freq = '2D'
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-07')
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2017-01-08')
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with non-fixed freq
freq = 'M'
start, end = Timestamp('2017-01-01'), Timestamp('2017-12-31')
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=11, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=11, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2018-01-15')
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_construction_from_timedelta(self, closed, name):
# combinations of start/end/periods without freq
start, end = Timedelta('1 day'), Timedelta('6 days')
breaks = timedelta_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with fixed freq
freq = '2D'
start, end = Timedelta('1 day'), Timedelta('7 days')
breaks = timedelta_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timedelta('7 days 1 hour')
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_constructor_coverage(self):
# float value for periods
expected = pd.interval_range(start=0, periods=10)
result = pd.interval_range(start=0, periods=10.5)
tm.assert_index_equal(result, expected)
# equivalent timestamp-like start/end
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-15')
expected = pd.interval_range(start=start, end=end)
result = pd.interval_range(start=start.to_pydatetime(),
end=end.to_pydatetime())
tm.assert_index_equal(result, expected)
result = pd.interval_range(start=start.asm8, end=end.asm8)
tm.assert_index_equal(result, expected)
# equivalent freq with timestamp
equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1),
DateOffset(days=1)]
for freq in equiv_freq:
result = pd.interval_range(start=start, end=end, freq=freq)
tm.assert_index_equal(result, expected)
# equivalent timedelta-like start/end
start, end = Timedelta(days=1), Timedelta(days=10)
expected = pd.interval_range(start=start, end=end)
result = pd.interval_range(start=start.to_pytimedelta(),
end=end.to_pytimedelta())
tm.assert_index_equal(result, expected)
result = pd.interval_range(start=start.asm8, end=end.asm8)
tm.assert_index_equal(result, expected)
# equivalent freq with timedelta
equiv_freq = ['D', Day(), | Timedelta(days=1) | pandas.Timedelta |
from datetime import datetime
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.generic import ABCDateOffset
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
PeriodIndex,
Series,
Timestamp,
bdate_range,
date_range,
)
from pandas.tests.test_base import Ops
import pandas.util.testing as tm
from pandas.tseries.offsets import BDay, BMonthEnd, CDay, Day, Hour
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH#7206
msg = "'Series' object has no attribute '{}'"
for op in ["year", "day", "second", "weekday"]:
with pytest.raises(AttributeError, match=msg.format(op)):
getattr(self.dt_series, op)
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
msg = "'Series' object has no attribute 'weekday'"
with pytest.raises(AttributeError, match=msg):
s.weekday
def test_repeat_range(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range("1/1/2000", "1/1/2001")
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
index = pd.date_range("2001-01-01", periods=2, freq="D", tz=tz)
exp = pd.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-02", "2001-01-02"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range("2001-01-01", periods=2, freq="2D", tz=tz)
exp = pd.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-03", "2001-01-03"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(["2001-01-01", "NaT", "2003-01-01"], tz=tz)
exp = pd.DatetimeIndex(
[
"2001-01-01",
"2001-01-01",
"2001-01-01",
"NaT",
"NaT",
"NaT",
"2003-01-01",
"2003-01-01",
"2003-01-01",
],
tz=tz,
)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self, tz_naive_fixture):
tz = tz_naive_fixture
reps = 2
msg = "the 'axis' parameter is not supported"
rng = pd.date_range(start="2016-01-01", periods=2, freq="30Min", tz=tz)
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
]
)
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
with pytest.raises(ValueError, match=msg):
np.repeat(rng, reps, axis=1)
def test_resolution(self, tz_naive_fixture):
tz = tz_naive_fixture
for freq, expected in zip(
["A", "Q", "M", "D", "H", "T", "S", "L", "U"],
[
"day",
"day",
"day",
"day",
"hour",
"minute",
"second",
"millisecond",
"microsecond",
],
):
idx = pd.date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)
assert idx.resolution == expected
def test_value_counts_unique(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 7735
idx = pd.date_range("2011-01-01 09:00", freq="H", periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz)
exp_idx = pd.date_range("2011-01-01 18:00", freq="-1H", periods=10, tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64")
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range("2011-01-01 09:00", freq="H", periods=10, tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(
[
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 08:00",
"2013-01-01 08:00",
pd.NaT,
],
tz=tz,
)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00"], tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00", pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(
DatetimeIndex,
(
[0, 1, 0],
[0, 0, -1],
[0, -1, -1],
["2015", "2015", "2016"],
["2015", "2015", "2014"],
),
):
assert idx[0] in idx
@pytest.mark.parametrize(
"idx",
[
DatetimeIndex(
["2011-01-01", "2011-01-02", "2011-01-03"], freq="D", name="idx"
),
DatetimeIndex(
["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"],
freq="H",
name="tzidx",
tz="Asia/Tokyo",
),
],
)
def test_order_with_freq(self, idx):
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer, np.array([2, 1, 0]), check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
@pytest.mark.parametrize(
"index_dates,expected_dates",
[
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
[pd.NaT, "2011-01-03", "2011-01-05", "2011-01-02", pd.NaT],
[pd.NaT, pd.NaT, "2011-01-02", "2011-01-03", "2011-01-05"],
),
],
)
def test_order_without_freq(self, index_dates, expected_dates, tz_naive_fixture):
tz = tz_naive_fixture
# without freq
index = DatetimeIndex(index_dates, tz=tz, name="idx")
expected = DatetimeIndex(expected_dates, tz=tz, name="idx")
ordered = index.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = index.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = index.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = index.sort_values(return_indexer=True, ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep="last")
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep="last")
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
@pytest.mark.parametrize(
"freq",
[
"A",
"2A",
"-2A",
"Q",
"-1Q",
"M",
"-1M",
"D",
"3D",
"-3D",
"W",
"-1W",
"H",
"2H",
"-2H",
"T",
"2T",
"S",
"-3S",
],
)
def test_infer_freq(self, freq):
# GH 11018
idx = pd.date_range("2011-01-01 09:00:00", freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq="infer")
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_nat(self, tz_naive_fixture):
tz = tz_naive_fixture
assert pd.DatetimeIndex._na_value is pd.NaT
assert pd.DatetimeIndex([])._na_value is pd.NaT
idx = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert idx.hasnans is False
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(["2011-01-01", "NaT"], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans is True
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"], tz="US/Pacific")
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz="US/Pacific")
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.copy())
assert not idx.equals(idx3.astype(object))
assert not idx.astype(object).equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
@pytest.mark.parametrize("values", [["20180101", "20180103", "20180105"], []])
@pytest.mark.parametrize("freq", ["2D", Day(2), "2B", BDay(2), "48H", Hour(48)])
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_freq_setter(self, values, freq, tz):
# GH 20678
idx = DatetimeIndex(values, tz=tz)
# can set to an offset, converting from string if necessary
idx.freq = freq
assert idx.freq == freq
assert isinstance(idx.freq, ABCDateOffset)
# can reset to None
idx.freq = None
assert idx.freq is None
def test_freq_setter_errors(self):
# GH 20678
idx = DatetimeIndex(["20180101", "20180103", "20180105"])
# setting with an incompatible freq
msg = (
"Inferred frequency 2D from passed values does not conform to "
"passed frequency 5D"
)
with pytest.raises(ValueError, match=msg):
idx.freq = "5D"
# setting with non-freq string
with pytest.raises(ValueError, match="Invalid frequency"):
idx.freq = "foo"
def test_offset_deprecated(self):
# GH 20716
idx = pd.DatetimeIndex(["20180101", "20180102"])
# getter deprecated
with tm.assert_produces_warning(FutureWarning):
idx.offset
# setter deprecated
with tm.assert_produces_warning(FutureWarning):
idx.offset = BDay()
class TestBusinessDatetimeIndex:
def setup_method(self, method):
self.rng = bdate_range(START, END)
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
assert comp[11]
assert not comp[9]
def test_pickle_unpickle(self):
unpickled = tm.round_trip_pickle(self.rng)
assert unpickled.freq is not None
def test_copy(self):
cp = self.rng.copy()
repr(cp)
tm.assert_index_equal(cp, self.rng)
def test_shift(self):
shifted = self.rng.shift(5)
assert shifted[0] == self.rng[5]
assert shifted.freq == self.rng.freq
shifted = self.rng.shift(-5)
assert shifted[5] == self.rng[0]
assert shifted.freq == self.rng.freq
shifted = self.rng.shift(0)
assert shifted[0] == self.rng[0]
assert shifted.freq == self.rng.freq
rng = date_range(START, END, freq=BMonthEnd())
shifted = rng.shift(1, freq=BDay())
assert shifted[0] == rng[0] + BDay()
def test_equals(self):
assert not self.rng.equals(list(self.rng))
def test_identical(self):
t1 = self.rng.copy()
t2 = self.rng.copy()
assert t1.identical(t2)
# name
t1 = t1.rename("foo")
assert t1.equals(t2)
assert not t1.identical(t2)
t2 = t2.rename("foo")
assert t1.identical(t2)
# freq
t2v = | Index(t2.values) | pandas.Index |
import sqlite3
import pandas as pd
import numpy as np
from pandas import Series, DataFrame
#@Author: <NAME>
#@Version: 1.0
#@Description: Function for show up the odds history for 2 team
def getOddsHistoryByTeam(team1_id,team2_id):
db_con = sqlite3.connect("database.sqlite")
Liga_match_history = pd.read_sql_query("select season,home_team_api_id,away_team_api_id,B365H,B365D,B365A from Match where home_team_api_id= %s and away_team_api_id= %s" % (team1_id,team2_id), db_con)
season_list = ['2015/2016']
Liga_match_history = Liga_match_history[Liga_match_history.season.isin(season_list)]
print("---------------History---------------------")
print(Liga_match_history)
print("---------------History---------------------")
#@Description: Function for return the team power by team_api_id
def getTeamsPower(team1_id,team2_id):
spanish_liga_2016_team_id = ['8315','9906','8634','9910','9783','8372','8558','8305','7878','8306','8581','9864','8370','8603','8633','8560','8302','9869','10267','10205']
db_con = sqlite3.connect("database.sqlite")
teams_prop = | pd.read_sql_query("SELECT team_api_id, date,buildUpPlaySpeed,chanceCreationShooting,defenceAggression from Team_Attributes", db_con) | pandas.read_sql_query |
from scseirx.model_school import SEIRX_school
import scseirx.analysis_functions as af
import pandas as pd
import numpy as np
import networkx as nx
from os.path import join
from scipy.stats import spearmanr, pearsonr
def weibull_two_param(shape, scale):
'''
Scales a Weibull distribution that is defined soely by its shape.
'''
return scale * np.random.weibull(shape)
def get_epi_params():
'''
Gets a combination of exposure duration, time until symptom onset and
infection duration that satisfies all conditions.
'''
# scale and shape of Weibull distributions defined by the following means
# and variances
# exposure_duration = [5, 1.9] / days
# time_until_symptoms = [6.4, 0.8] / days
# infection_duration = [10.91, 3.95] / days
epi_params = {
'exposure_duration': [2.8545336526034513, 5.610922825244271],
'time_until_symptoms': [9.602732979535194, 6.738998146675984],
'infection_duration': [3.012881111335679, 12.215213280459125]}
tmp_epi_params = {}
# iterate until a combination that fulfills all conditions is found
while True:
for param_name, param in epi_params.items():
tmp_epi_params[param_name] = \
round(weibull_two_param(param[0], param[1]))
# conditions
if tmp_epi_params['exposure_duration'] > 0 and \
tmp_epi_params['time_until_symptoms'] >= \
tmp_epi_params['exposure_duration'] and\
tmp_epi_params['infection_duration'] > \
tmp_epi_params['exposure_duration']:
return tmp_epi_params
def calculate_distribution_difference(school_type, ensemble_results, \
outbreak_sizes):
'''
Calculates the difference between the expected distribution of outbreak
sizes and the observed outbreak sizes in an ensemble of simulation runs
with the same parameters. The data-frame ensemble_results holds the number
of infected students and the number of infected teachers. NOTE: the index
case is already subtracted from these numbers.
Parameters
----------
school_type : string
school type for which the distribution difference should be calculated.
Can be "primary", "primary_dc", "lower_secondary", "lower_secondary_dc",
"upper_secondary", "secondary" or "secondary_dc"
ensemble_results : pandas DataFrame
Data frame holding the results of the simulated outbreaks for a given
school type and parameter combination. The outbreak size has to be given
in the column "infected_total".
outbreak_size : pandas DataFrame
Data frame holding the empirical outbreak size observations. The
outbreak size has to be given in the column "size", the school type in
the column "type".
Returns
-------
chi_2_distance : float
chi-squared distance between the simulated and empirically observed
outbreak size distributions
sum_of_squares : float
sum of squared differences between the simulated and empirically
observed outbreak size distributions.
'''
# calculate the total number of follow-up cases (outbreak size)
ensemble_results['infected_total'] = ensemble_results['infected_teachers'] +\
ensemble_results['infected_students']
ensemble_results = ensemble_results.astype(int)
# censor runs with no follow-up cases as we also do not observe these in the
# empirical data
ensemble_results = ensemble_results[ensemble_results['infected_total'] > 0].copy()
observed_outbreaks = ensemble_results['infected_total'].value_counts()
observed_outbreaks = observed_outbreaks / observed_outbreaks.sum()
obs_dict = {size:ratio for size, ratio in zip(observed_outbreaks.index,
observed_outbreaks.values)}
# since we only have aggregated data for schools with and without daycare,
# we map the daycare school types to their corresponding non-daycare types,
# which are also the labels of the schools in the emirical data
type_map = {'primary':'primary', 'primary_dc':'primary',
'lower_secondary':'lower_secondary',
'lower_secondary_dc':'lower_secondary',
'upper_secondary':'upper_secondary',
'secondary':'secondary', 'secondary_dc':'secondary'}
school_type = type_map[school_type]
expected_outbreaks = outbreak_sizes[\
outbreak_sizes['type'] == school_type].copy()
expected_outbreaks.index = expected_outbreaks['size']
exp_dict = {s:c for s, c in zip(expected_outbreaks.index,
expected_outbreaks['ratio'])}
# add zeroes for both the expected and observed distributions in cases
# (sizes) that were not observed
if len(observed_outbreaks) == 0:
obs_max = 0
else:
obs_max = observed_outbreaks.index.max()
for i in range(1, max(obs_max + 1,
expected_outbreaks.index.max() + 1)):
if i not in observed_outbreaks.index:
obs_dict[i] = 0
if i not in expected_outbreaks.index:
exp_dict[i] = 0
obs = np.asarray([obs_dict[i] for i in range(1, len(obs_dict) + 1)])
exp = np.asarray([exp_dict[i] for i in range(1, len(exp_dict) + 1)])
chi2_distance = ((exp + 1) - (obs + 1))**2 / (exp + 1)
chi2_distance = chi2_distance.sum()
sum_of_squares = ((exp - obs)**2).sum()
return chi2_distance, sum_of_squares
def calculate_group_case_difference(school_type, ensemble_results,\
group_distributions):
'''
Calculates the difference between the expected number of infected teachers
/ infected students and the observed number of infected teachers / students
in an ensemble of simulation runs with the same parameters. The data-frame
ensemble_results holds the number of infected students and the number of
infected teachers. NOTE: the index case is already subtracted from these
numbers.
Parameters
----------
school_type : string
school type for which the distribution difference should be calculated.
Can be "primary", "primary_dc", "lower_secondary", "lower_secondary_dc",
"upper_secondary", "secondary" or "secondary_dc"
ensemble_results : pandas DataFrame
Data frame holding the results of the simulated outbreaks for a given
school type and parameter combination. The outbreak size has to be given
in the column "infected_total".
group_distributions : pandas DataFrame
Data frame holding the empirical observations of the ratio of infections
in a given group (student, teacher) as compared to the overall number of
infections (students + teachers). The data frame has three columns:
"school_type", "group" and "ratio", where "group" indicates which group
(student or teacher) the number in "ratio" belongs to.
Returns
-------
chi_2_distance : float
chi-squared distance between the simulated and empirically observed
outbreak size distributions
sum_of_squares : float
sum of squared differences between the simulated and empirically
observed outbreak size distributions.
'''
# calculate the total number of follow-up cases (outbreak size)
ensemble_results['infected_total'] = ensemble_results['infected_teachers'] +\
ensemble_results['infected_students']
# censor runs with no follow-up cases as we also do not observe these in the
# empirical data
ensemble_results = ensemble_results[ensemble_results['infected_total'] > 0].copy()
# calculate ratios of infected teachers and students
ensemble_results['teacher_ratio'] = ensemble_results['infected_teachers'] / \
ensemble_results['infected_total']
ensemble_results['student_ratio'] = ensemble_results['infected_students'] / \
ensemble_results['infected_total']
observed_distro = pd.DataFrame({'group':['student', 'teacher'],
'ratio':[ensemble_results['student_ratio'].mean(),
ensemble_results['teacher_ratio'].mean()]})
# since we only have aggregated data for schools with and without daycare,
# we map the daycare school types to their corresponding non-daycare types,
# which are also the labels of the schools in the emirical data
type_map = {'primary':'primary', 'primary_dc':'primary',
'lower_secondary':'lower_secondary',
'lower_secondary_dc':'lower_secondary',
'upper_secondary':'upper_secondary',
'secondary':'secondary', 'secondary_dc':'secondary'}
school_type = type_map[school_type]
expected_distro = group_distributions[\
group_distributions['type'] == school_type].copy()
expected_distro.index = expected_distro['group']
obs = observed_distro['ratio'].values
exp = expected_distro['ratio'].values
chi2_distance = ((exp + 1) - (obs + 1))**2 / (exp + 1)
chi2_distance = chi2_distance.sum()
sum_of_squares = ((exp - obs)**2).sum()
return chi2_distance, sum_of_squares
def get_outbreak_size_pdf(school_type, ensemble_results, outbreak_sizes):
'''
Extracts the discrite probability density function of outbreak sizes from
the simulated and empirically measured outbreaks.
Parameters:
-----------
school_type : string
school type for which the distribution difference should be calculated.
Can be "primary", "primary_dc", "lower_secondary", "lower_secondary_dc",
"upper_secondary", "secondary" or "secondary_dc"
ensemble_results : pandas DataFrame
Data frame holding the results of the simulated outbreaks for a given
school type and parameter combination. The outbreak size has to be given
in the column "infected_total".
outbreak_size : pandas DataFrame
Data frame holding the empirical outbreak size observations. The
outbreak size has to be given in the column "size", the school type in
the column "type".
Returns:
--------
simulation_pdf : numpy 1-d array
Discrete probability density function of outbreak sizes from simulations
empirical_pdf : numpy 1-d array
Discrete probability density function of empirically observed outbreak
sizes.
'''
# censor runs with no follow-up cases as we also do not observe these in the
# empirical data
ensemble_results = ensemble_results[ensemble_results['infected_total'] > 0].copy()
obs = ensemble_results['infected_total'].value_counts()
obs = obs / obs.sum()
obs_dict = {size:ratio for size, ratio in zip(obs.index, obs.values)}
# since we only have aggregated data for schools with and without daycare,
# we map the daycare school types to their corresponding non-daycare types,
# which are also the labels of the schools in the emirical data
type_map = {'primary':'primary', 'primary_dc':'primary',
'lower_secondary':'lower_secondary',
'lower_secondary_dc':'lower_secondary',
'upper_secondary':'upper_secondary',
'secondary':'secondary', 'secondary_dc':'secondary'}
school_type = type_map[school_type]
expected_outbreaks = outbreak_sizes[\
outbreak_sizes['type'] == school_type].copy()
expected_outbreaks.index = expected_outbreaks['size']
exp_dict = {s:c for s, c in zip(range(1, expected_outbreaks.index.max() + 1),
expected_outbreaks['ratio'])}
# add zeroes for both the expected and observed distributions in cases
# (sizes) that were not observed
if len(obs) == 0:
obs_max = 0
else:
obs_max = obs.index.max()
for i in range(1, max(obs_max + 1,
expected_outbreaks.index.max() + 1)):
if i not in obs.index:
obs_dict[i] = 0
if i not in expected_outbreaks.index:
exp_dict[i] = 0
simulation_pdf = np.asarray([obs_dict[i] for i in range(1, len(obs_dict) + 1)])
empirical_pdf = np.asarray([exp_dict[i] for i in range(1, len(exp_dict) + 1)])
return simulation_pdf, empirical_pdf
def get_outbreak_size_pdf_groups(school_type, ensemble_results, outbreak_sizes,
group_distributions):
'''
Extracts the discrite probability density function of outbreak sizes from
the simulated and empirically measured outbreaks divided into separate pdfs
for students and teachers.
Parameters:
-----------
school_type : string
school type for which the distribution difference should be calculated.
Can be "primary", "primary_dc", "lower_secondary", "lower_secondary_dc",
"upper_secondary", "secondary" or "secondary_dc"
ensemble_results : pandas DataFrame
Data frame holding the results of the simulated outbreaks for a given
school type and parameter combination. The outbreak size has to be given
in the column "infected_total".
outbreak_size : pandas DataFrame
Data frame holding the empirical outbreak size observations. The
outbreak size has to be given in the column "size", the school type in
the column "type".
group_distributions : pandas DataFrame
Data frame holding the empirical observations of the ratio of infections
in a given group (student, teacher) as compared to the overall number of
infections (students + teachers). The data frame has three columns:
"school_type", "group" and "ratio", where "group" indicates which group
(student or teacher) the number in "ratio" belongs to.
Returns:
--------
simulation_pdf_student : numpy 1-d array
Discrete probability density function of outbreak sizes from simulations
for students.
simulation_pdf_teacher : numpy 1-d array
Discrete probability density function of outbreak sizes from simulations
for teachers.
empirical_pdf_student : numpy 1-d array
Discrete probability density function of empirically observed outbreak
sizes for students.
empirical_pdf_teacher : numpy 1-d array
Discrete probability density function of empirically observed outbreak
sizes for teachers.
'''
# censor runs with no follow-up cases as we also do not observe these in the
# empirical data
ensemble_results_student = ensemble_results[ensemble_results['infected_students'] > 0].copy()
ensemble_results_teacher = ensemble_results[ensemble_results['infected_teachers'] > 0].copy()
obs_student = ensemble_results_student['infected_students'].value_counts()
obs_student = obs_student / obs_student.sum()
obs_teacher = ensemble_results_teacher['infected_teachers'].value_counts()
obs_teacher = obs_teacher / obs_teacher.sum()
obs_student_dict = {size:ratio for size, ratio in \
zip(obs_student.index, obs_student.values)}
obs_teacher_dict = {size:ratio for size, ratio in \
zip(obs_teacher.index, obs_teacher.values)}
# since we only have aggregated data for schools with and without daycare,
# we map the daycare school types to their corresponding non-daycare types,
# which are also the labels of the schools in the emirical data
type_map = {'primary':'primary', 'primary_dc':'primary',
'lower_secondary':'lower_secondary',
'lower_secondary_dc':'lower_secondary',
'upper_secondary':'upper_secondary',
'secondary':'secondary', 'secondary_dc':'secondary'}
school_type = type_map[school_type]
expected_outbreaks = outbreak_sizes[\
outbreak_sizes['type'] == school_type].copy()
expected_outbreaks.index = expected_outbreaks['size']
exp_student_dict = {s:c for s, c in zip(range(1, \
expected_outbreaks.index.max() + 1), expected_outbreaks['ratio'])}
exp_teacher_dict = {s:c for s, c in zip(range(1, \
expected_outbreaks.index.max() + 1), expected_outbreaks['ratio'])}
# add zeroes for both the expected and observed distributions in cases
# (sizes) that were not observed
if len(obs_student) == 0:
obs_student_max = 0
else:
obs_student_max = obs_student.index.max()
if len(obs_teacher) == 0:
obs_teacher_max = 0
else:
obs_teacher_max = obs_teacher.index.max()
for i in range(1, max(obs_student_max + 1,
expected_outbreaks.index.max() + 1)):
if i not in obs_student.index:
obs_student_dict[i] = 0
if i not in expected_outbreaks.index:
exp_student_dict[i] = 0
for i in range(1, max(obs_teacher_max + 1,
expected_outbreaks.index.max() + 1)):
if i not in obs_teacher.index:
obs_teacher_dict[i] = 0
if i not in expected_outbreaks.index:
exp_teacher_dict[i] = 0
# the normalization of the probability density function of infected students
# and teachers from simulations is such that
# \int (f(x, student) + f(x, teacher)) dx = 1, where x = cluster size.
# We therefore need to ensure the normalization of the empirically observed
# pdfs is the same, to be able to compare it to the pdf from the simulations.
# We do this by multiplying the pdf with the empirically observed ratios of
# infected students and teachers.
simulation_group_distribution_pdf, empirical_group_distribution_pdf = \
get_group_case_pdf(school_type, ensemble_results, group_distributions)
simulation_student_ratio, simulation_teacher_ratio = simulation_group_distribution_pdf
empirical_student_ratio, empirical_teacher_ratio = empirical_group_distribution_pdf
simulation_student_pdf = np.asarray([obs_student_dict[i] for \
i in range(1, len(obs_student_dict) + 1)]) * simulation_student_ratio
empirical_student_pdf = np.asarray([exp_student_dict[i] for \
i in range(1, len(exp_student_dict) + 1)]) * empirical_student_ratio
simulation_teacher_pdf = np.asarray([obs_teacher_dict[i] for \
i in range(1, len(obs_teacher_dict) + 1)]) * simulation_teacher_ratio
empirical_teacher_pdf = np.asarray([exp_teacher_dict[i] for \
i in range(1, len(exp_teacher_dict) + 1)]) * empirical_teacher_ratio
return simulation_student_pdf, simulation_teacher_pdf, \
empirical_student_pdf, empirical_teacher_pdf
def get_group_case_pdf(school_type, ensemble_results, group_distributions):
'''
Extracts the ratios of simulated and empirically observed infected teachers
and infected students for a given simulation parameter combination.
Parameters
----------
school_type : string
school type for which the distribution difference should be calculated.
Can be "primary", "primary_dc", "lower_secondary", "lower_secondary_dc",
"upper_secondary", "secondary" or "secondary_dc"
ensemble_results : pandas DataFrame
Data frame holding the results of the simulated outbreaks for a given
school type and parameter combination. The outbreak size has to be given
in the column "infected_total".
group_distributions : pandas DataFrame
Data frame holding the empirical observations of the ratio of infections
in a given group (student, teacher) as compared to the overall number of
infections (students + teachers). The data frame has three columns:
"school_type", "group" and "ratio", where "group" indicates which group
(student or teacher) the number in "ratio" belongs to.
Returns:
--------
simulation_pdf : numpy 1-d array
Discrete probability density function of outbreak sizes from simulations
empirical_pdf : numpy 1-d array
Discrete probability density function of empirically observed outbreak
sizes.
'''
# censor runs with no follow-up cases as we also do not observe these in the
# empirical data
ensemble_results = ensemble_results[ensemble_results['infected_total'] > 0].copy()
# calculate ratios of infected teachers and students
ensemble_results['teacher_ratio'] = ensemble_results['infected_teachers'] / \
ensemble_results['infected_total']
ensemble_results['student_ratio'] = ensemble_results['infected_students'] / \
ensemble_results['infected_total']
observed_distro = pd.DataFrame(\
{'group':['student', 'teacher'],
'ratio':[ensemble_results['student_ratio'].mean(),
ensemble_results['teacher_ratio'].mean()]})
observed_distro = observed_distro.set_index('group')
# since we only have aggregated data for schools with and without daycare,
# we map the daycare school types to their corresponding non-daycare types,
# which are also the labels of the schools in the emirical data
type_map = {'primary':'primary', 'primary_dc':'primary',
'lower_secondary':'lower_secondary',
'lower_secondary_dc':'lower_secondary',
'upper_secondary':'upper_secondary',
'secondary':'secondary', 'secondary_dc':'secondary'}
school_type = type_map[school_type]
expected_distro = group_distributions[\
group_distributions['type'] == school_type].copy()
expected_distro.index = expected_distro['group']
simulation_pdf = np.asarray([observed_distro['ratio']['student'],
observed_distro['ratio']['teacher']])
empirical_pdf = np.asarray([expected_distro['ratio']['student'],
expected_distro['ratio']['teacher']])
return simulation_pdf, empirical_pdf
def calculate_chi2_distance(simulation_pdf, empirical_pdf):
'''
Calculates the Chi-squared distance between the expected distribution of
outbreak sizes and the observed outbreak sizes in an ensemble of simulation
runs with the same parameters.
Parameters:
-----------
simulation_pdf : numpy 1-d array
Discrete probability density function of the outbreak sizes observed in
the simulations. The index case needs to be subtracted from the pdf and
the pdf should be censored at 0 (as outbreaks of size 0 can not be
observed empirically).
empirical_pdf : numpy 1-d array
Discrete probability density function of the outbreak sizes observed in
schools. Index cases are NOT included in outbreak sizes.
Returns
-------
chi_2_distance : float
Chi-squared distance between the simulated and empirically observed
outbreak size distributions
'''
chi2_distance = ((empirical_pdf + 1) - (simulation_pdf + 1))**2 / \
(empirical_pdf + 1)
chi2_distance = chi2_distance.sum()
return chi2_distance
def calculate_sum_of_squares_distance(simulation_pdf, empirical_pdf):
'''
Calculates the sum of squared distances between the expected distribution of
outbreak sizes and the observed outbreak sizes in an ensemble of simulation
runs with the same parameters.
Parameters:
-----------
simulation_pdf : numpy 1-d array
Discrete probability density function of the outbreak sizes observed in
the simulations. The index case needs to be subtracted from the pdf and
the pdf should be censored at 0 (as outbreaks of size 0 can not be
observed empirically).
empirical_pdf : numpy 1-d array
Discrete probability density function of the outbreak sizes observed in
schools. Index cases are NOT included in outbreak sizes.
Returns:
--------
sum_of_squares : float
sum of squared differences between the simulated and empirically
observed outbreak size distributions.
'''
sum_of_squares = ((empirical_pdf - simulation_pdf)**2).sum()
return sum_of_squares
def calculate_qq_regression_slope(simulation_pdf, empirical_pdf):
'''
Calculates the slope of a linear fit with intercept=0 to the qq plot of the
probability density function of the simulated values versus the pdf of the
empirically observed values. The number of quantiles is chosen to be 1/N,
where N is the number of unique outbreak sizes observed in the simulation.
Returns the absolute value of the difference between the slope of the fit
and a (perfect) slope of 1.
Parameters:
-----------
simulation_pdf : numpy 1-d array
Discrete probability density function of the outbreak sizes observed in
the simulations. The index case needs to be subtracted from the pdf and
the pdf should be censored at 0 (as outbreaks of size 0 can not be
observed empirically).
empirical_pdf : numpy 1-d array
Discrete probability density function of the outbreak sizes observed in
schools. Index cases are NOT included in outbreak sizes.
Returns:
--------
a : float
Slope of the linear regression with intercept = 0 through the qq-plot
of the simulated vs. the empirical discrete pdf.
'''
quant = 1 / len(simulation_pdf)
simulation_quantiles = np.quantile(simulation_pdf, np.arange(0, 1, quant))
empirical_quantiles = np.quantile(empirical_pdf, np.arange(0, 1, quant))
a, _, _, _ = np.linalg.lstsq(simulation_quantiles[:, np.newaxis], empirical_quantiles,
rcond=None)
return np.abs(1 - a[0])
def calculate_pp_regression_slope(obs_cdf, exp_cdf):
'''
Calculates the slope of a linear fit with intercept=0 to the pp plot of the
cumulative probability density function of the simulated values versus the
cdf of the empirically observed values. Returns the absolute value of the
difference between the slope of the fit and a (perfect) slope of 1.
Parameters:
-----------
simulation_cdf : numpy 1-d array
Discrete cumulative probability density function of the outbreak sizes
observed in the simulations. The index case needs to be subtracted from
the pdf before the cdf is calculated, and the pdf should be censored at
0 (as outbreaks of size 0 can not be observed empirically).
empirical_pdf : numpy 1-d array
Discrete cumulative probability density function of the outbreak sizes
observed in schools. Index cases are NOT included in the outbreak size
pdf from which the cdf was calculated.
Returns:
--------
a : float
Slope of the linear regression with intercept = 0 through the pp-plot
of the simulated vs. the empirical discrete cdf.
'''
a, _, _, _ = np.linalg.lstsq(obs_cdf[:, np.newaxis], exp_cdf,
rcond=None)
return np.abs(1 - a[0])
def calculate_bhattacharyya_distance(p, q):
'''
Calculates the Bhattacharyya distance between the discrete probability
density functions p and q.
See also https://en.wikipedia.org/wiki/Bhattacharyya_distance).
Parameters:
-----------
p, q : numpy 1-d array
Discrete probability density function.
empirical_pdf : numpy 1-d array
Discrete probability density function of the outbreak sizes observed in
schools. Index cases are NOT included in outbreak sizes.
Returns:
--------
DB : float
Bhattacharyya distance between the discrete probability
density functions p and q.
'''
BC = np.sqrt(p * q).sum()
DB = - np.log(BC)
return DB
def calculate_distances_two_distributions(ensemble_results, school_type,
intermediate_contact_weight, far_contact_weight,
age_transmission_discount, outbreak_sizes, group_distributions):
sim_stud_pdf, sim_teach_pdf, emp_stud_pdf, emp_teach_pdf = \
get_outbreak_size_pdf_groups(school_type, ensemble_results,\
outbreak_sizes, group_distributions)
chi2_distance_student = calculate_chi2_distance(sim_stud_pdf, emp_stud_pdf)
chi2_distance_teacher = calculate_chi2_distance(sim_teach_pdf, emp_teach_pdf)
row = {
'school_type':school_type,
'intermediate_contact_weight':intermediate_contact_weight,
'far_contact_weight':far_contact_weight,
'age_transmission_discount':age_transmission_discount,
'chi2_distance_student':chi2_distance_student,
'chi2_distance_teacher':chi2_distance_teacher,
}
return row
def calculate_distances(ensemble_results, school_type, intermediate_contact_weight,
far_contact_weight, age_transmission_discount,
outbreak_size, group_distribution):
# calculate the Chi-squared distance and the sum of squared differences
# between the simulated and empirically observed ratios of teacher- and
# student cases
simulation_group_distribution_pdf, empirical_group_distribution_pdf = \
get_group_case_pdf(school_type, ensemble_results, group_distribution)
chi2_distance_distro = calculate_chi2_distance(\
simulation_group_distribution_pdf, empirical_group_distribution_pdf)
sum_of_squares_distro = calculate_sum_of_squares_distance(\
simulation_group_distribution_pdf, empirical_group_distribution_pdf)
# calculate various distance measures between the simulated and empirically
# observed outbreak size distributions
simulation_outbreak_size_pdf, empirical_outbreak_size_pdf = \
get_outbreak_size_pdf(school_type, ensemble_results, outbreak_size)
simulation_outbreak_size_cdf = simulation_outbreak_size_pdf.cumsum()
empirical_outbreak_size_cdf = empirical_outbreak_size_pdf.cumsum()
# Chi-squared distance
chi2_distance_size = calculate_chi2_distance(simulation_outbreak_size_pdf,
empirical_outbreak_size_pdf)
# sum of squared differences
sum_of_squares_size = calculate_sum_of_squares_distance(\
simulation_outbreak_size_pdf, empirical_outbreak_size_pdf)
# Bhattacharyya distance between the probability density functions
bhattacharyya_distance_size = calculate_bhattacharyya_distance(\
simulation_outbreak_size_pdf, empirical_outbreak_size_pdf)
# Pearson correlation between the cumulative probability density functions
pearsonr_size = np.abs(1 - pearsonr(simulation_outbreak_size_cdf,
empirical_outbreak_size_cdf)[0])
# Spearman correlation between the cumulative probability density functions
spearmanr_size = np.abs(1 - spearmanr(simulation_outbreak_size_cdf,
empirical_outbreak_size_cdf)[0])
# Slope of the qq-plot with 0 intercept
qq_slope_size = calculate_qq_regression_slope(simulation_outbreak_size_pdf,
empirical_outbreak_size_pdf)
# Slope of the pp-plot with 0 intercept
pp_slope_size = calculate_pp_regression_slope(simulation_outbreak_size_pdf,
empirical_outbreak_size_pdf)
row = {
'school_type':school_type,
'intermediate_contact_weight':intermediate_contact_weight,
'far_contact_weight':far_contact_weight,
'age_transmission_discount':age_transmission_discount,
'chi2_distance_distro':chi2_distance_distro,
'sum_of_squares_distro':sum_of_squares_distro,
'chi2_distance_size':chi2_distance_size,
'sum_of_squares_size':sum_of_squares_size,
'bhattacharyya_distance_size':bhattacharyya_distance_size,
'pearsonr_difference_size':pearsonr_size,
'spearmanr_difference_size':spearmanr_size,
'qq_difference_size':qq_slope_size,
'pp_difference_size':pp_slope_size,
}
return row
def compose_agents(prevention_measures):
'''
Utility function to compose agent dictionaries as expected by the simulation
model as input from the dictionary of prevention measures.
Parameters
----------
prevention_measures : dictionary
Dictionary of prevention measures. Needs to include the fields
(student, teacher, family_member) _screen_interval, index_probability
and _mask.
Returns
-------
agent_types : dictionary of dictionaries
Dictionary containing the fields "screening_interval",
"index_probability" and "mask" for the agent groups "student", "teacher"
and "family_member".
'''
agent_types = {
'student':{
'screening_interval':prevention_measures['student_screen_interval'],
'index_probability':prevention_measures['student_index_probability'],
'mask':prevention_measures['student_mask']},
'teacher':{
'screening_interval': prevention_measures['teacher_screen_interval'],
'index_probability': prevention_measures['student_index_probability'],
'mask':prevention_measures['teacher_mask']},
'family_member':{
'screening_interval':prevention_measures['family_member_screen_interval'],
'index_probability':prevention_measures['family_member_index_probability'],
'mask':prevention_measures['family_member_mask']}
}
return agent_types
def run_model(school_type, run, intermediate_contact_weight,
far_contact_weight, age_transmission_discount,
prevention_measures, school_characteristics,
agent_index_ratios, simulation_params,
contact_network_src, N_steps=500):
'''
Runs a simulation with an SEIRX_school model
(see https://pypi.org/project/scseirx/1.3.0/), given a set of parameters
which are calibrated.
Parameters:
-----------
school_type : string
School type for which the model is run. This affects the selected school
characteristics and ratio of index cases between students and teachers.
Can be "primary", "primary_dc", "lower_secondary", "lower_secondary_dc",
"upper_secondary", "secondary" or "secondary_dc".
run : integer
Consecutive number of the simulation run within the ensemble of
simulation runs with the same school type and parameter combination.
This is needed to load the correct contact network, since every run
in a given ensemble uses a different contact network that has a random
choice of household sizes and sibling connections, based on the
Austrian household statistics.
intermediate_contact_weight : float
Weight of contacts of type "intermediate" (as compared to household)
contacts. Note: This parameter is formulated in terms of a "weight",
i.e. a multiplicative factor to the intensity of the household contact
(which is 1 by default). This is different from the "probability of"
failure formulation of the factor in the Bernoulli trial notation. The
probability of failure is 1 - the contact weight.
far_contact_weight : float
Weight of contacts of type "far" (as compared to household)
contacts. Similar to intermediate_contact_weight, this parameter is
formulated as a weight.
age_transmission_discount : float
Factor by which younger children are less likely to receive and transmit
an infection. More specifically, the age_transmission_discount is the
slope of a piecewise linear function that is 1 at age 18 (and above)
and decreases for younger ages.
prevention_measures : dictionary
Dictionary listing all prevention measures in place for the given
scenario. Fields that are not specifically included in this dictionary
will revert to SEIRX_school defaults.
school_characteristics: dictionary
Dictionary holding the characteristics of each possible school type.
Needs to include the fields "classes" and "students" (i.e. the number)
of students per class. The number of teachers is calculated
automatically from the given school type and number of classes.
agent_index_ratios : pandas DataFrame
Data frame holding the empirically observed index case ratios for
students and teachers. Has to include the columns "school_type",
"student" and "teacher".
simulation_params : dictionary
Dictionary holding simulation parameters such as "verbosity" and
"base_transmission_risk". Fields that are not included will revert back
to SEIRX_school defaults.
contact_network_src : string
Absolute or relative path pointing to the location of the contact
network used for the calibration runs. The location needs to hold the
contact networks for each school types in a sub-folder with the same
name as the school type. Networks need to be saved in networkx's .bz2
format.
N_steps : integer
Number of maximum steps per run. This is a very conservatively chosen
value that ensures that an outbreak will always terminate within the
allotted time. Most runs are terminated way earlier anyways, as soon as
the outbreak is over.
Returns
-------
model : SEIRX_school model instance holding a completed simulation run and
all associated data.
index_case : agent group from which the index case was drawn in the given
simulation run.
'''
# since we only use contacts of type "close", "intermediate" and "far" in
# this setup, we set the contact type "very far" to 0. The contact type
# "close" corresponds to household transmissions and is set to 1 (= base
# transmission risk). We therefore only calibrate the weight of the
# "intermediate" and "far" contacts with respect to household contacts
infection_risk_contact_type_weights = {
'very_far': 0,
'far': far_contact_weight,
'intermediate': intermediate_contact_weight,
'close': 1}
# get the respective parameters for the given school type
measures = prevention_measures[school_type]
characteristics = school_characteristics[school_type]
agent_index_ratio = agent_index_ratios.loc[school_type]
# create the agent dictionaries based on the given parameter values and
# prevention measures
agent_types = compose_agents(measures)
school_name = '{}_classes-{}_students-{}'.format(school_type,
characteristics['classes'], characteristics['students'])
school_src = join(contact_network_src, school_type)
# load the contact graph: since households and sibling contacts
# are random, there are a number of randomly created instances of
# calibration schools from which we can chose. We use a different
# calibration school instance for every run here
G = nx.readwrite.gpickle.read_gpickle(join(school_src,\
'{}_{}.bz2'.format(school_name, run%2000)))
# pick an index case according to the probabilities for the school type
index_case = np.random.choice(['teacher', 'student'],
p=[agent_index_ratios.loc['primary']['teacher'],
agent_index_ratios.loc['primary']['student']])
# initialize the model
model = SEIRX_school(G,
simulation_params['verbosity'],
base_transmission_risk = simulation_params['base_transmission_risk'],
testing = measures['testing'],
exposure_duration = simulation_params['exposure_duration'],
time_until_symptoms = simulation_params['time_until_symptoms'],
infection_duration = simulation_params['infection_duration'],
quarantine_duration = measures['quarantine_duration'],
subclinical_modifier = simulation_params['subclinical_modifier'],
infection_risk_contact_type_weights = \
infection_risk_contact_type_weights,
K1_contact_types = measures['K1_contact_types'],
diagnostic_test_type = measures['diagnostic_test_type'],
preventive_screening_test_type = \
measures['preventive_screening_test_type'],
follow_up_testing_interval = \
measures['follow_up_testing_interval'],
liberating_testing = measures['liberating_testing'],
index_case = index_case,
agent_types = agent_types,
age_transmission_risk_discount = \
{'slope':age_transmission_discount, 'intercept':1},
age_symptom_modification = simulation_params['age_symptom_discount'],
mask_filter_efficiency = measures['mask_filter_efficiency'],
transmission_risk_ventilation_modifier = \
measures['transmission_risk_ventilation_modifier'],)
# run the model until the outbreak is over
for i in range(N_steps):
# break if first outbreak is over
if len([a for a in model.schedule.agents if \
(a.exposed == True or a.infectious == True)]) == 0:
break
model.step()
return model, index_case
def run_ensemble(N_runs, school_type, intermediate_contact_weight,
far_contact_weight, age_transmission_discount,
prevention_measures, school_characteristics,
agent_index_ratios, simulation_params,
contact_network_src, ensmbl_dst):
'''
Utility function to run an ensemble of simulations for a given school type
and parameter combination.
Parameters:
----------
N_runs : integer
Number of individual simulation runs in the ensemble.
school_type : string
School type for which the model is run. This affects the selected school
characteristics and ratio of index cases between students and teachers.
Can be "primary", "primary_dc", "lower_secondary", "lower_secondary_dc",
"upper_secondary", "secondary" or "secondary_dc".
school_type : string
School type for which the ensemble is run. This affects the selected
school characteristics and ratio of index cases between students and
teachers. Can be "primary", "primary_dc", "lower_secondary",
"lower_secondary_dc", "upper_secondary", "secondary" or "secondary_dc".
intermediate_contact_weight : float
Weight of contacts of type "intermediate" (as compared to household)
contacts. Note: This parameter is formulated in terms of a "weight",
i.e. a multiplicative factor to the intensity of the household contact
(which is 1 by default). This is different from the "probability of"
failure formulation of the factor in the Bernoulli trial notation. The
probability of failure is 1 - the contact weight.
far_contact_weight : float
Weight of contacts of type "far" (as compared to household)
contacts. Similar to intermediate_contact_weight, this parameter is
formulated as a weight.
age_transmission_discount : float
Factor by which younger children are less likely to receive and transmit
an infection. More specifically, the age_transmission_discount is the
slope of a piecewise linear function that is 1 at age 18 (and above)
and decreases for younger ages.
prevention_measures : dictionary
Dictionary listing all prevention measures in place for the given
scenario. Fields that are not specifically included in this dictionary
will revert to SEIRX_school defaults.
school_characteristics: dictionary
Dictionary holding the characteristics of each possible school type.
Needs to include the fields "classes" and "students" (i.e. the number)
of students per class. The number of teachers is calculated
automatically from the given school type and number of classes.
agent_index_ratios : pandas DataFrame
Data frame holding the empirically observed index case ratios for
students and teachers. Has to include the columns "school_type",
"student" and "teacher".
simulation_params : dictionary
Dictionary holding simulation parameters such as "verbosity" and
"base_transmission_risk". Fields that are not included will revert back
to SEIRX_school defaults.
contact_network_src : string
Absolute or relative path pointing to the location of the contact
network used for the calibration runs. The location needs to hold the
contact networks for each school types in a sub-folder with the same
name as the school type. Networks need to be saved in networkx's .bz2
format.
ensmbl_dst : string
Absolute or relative path pointing to the location where full ensemble
results should be saved.
Returns:
--------
ensemble_results : pandas DataFrame
Data Frame holding the observable of interest of the ensemble, namely
the number of infected students and teachers.
'''
ensemble_results = pd.DataFrame()
ensemble_runs = pd.DataFrame()
for run in range(1, N_runs + 1):
model, index_case = run_model(school_type, run,
intermediate_contact_weight,
far_contact_weight, age_transmission_discount,
prevention_measures, school_characteristics,
agent_index_ratios, simulation_params,
contact_network_src)
# collect the observables needed to calculate the difference to the
# expected values
infected_teachers = af.count_infected(model, 'teacher')
infected_students = af.count_infected(model, 'student')
# subtract the index case from the number of infected teachers/students
# to arrive at the number of follow-up cases
if index_case == 'teacher':
infected_teachers -= 1
else:
infected_students -= 1
# add run results to the ensemble results
ensemble_results = ensemble_results.append({
'infected_teachers':infected_teachers,
'infected_students':infected_students}, ignore_index=True)
# collect the statistics of the single run
data = model.datacollector.get_model_vars_dataframe()
data['run'] = run
data['step'] = range(0, len(data))
ensemble_runs = pd.concat([ensemble_runs, data])
ensemble_runs = ensemble_runs.reset_index(drop=True)
ensemble_runs.to_csv(join(ensmbl_dst,
'school_type-{}_icw-{:1.2f}_fcw-{:1.2f}_atd-{:1.4f}.csv'\
.format(school_type, intermediate_contact_weight, far_contact_weight,
age_transmission_discount)), index=False)
return ensemble_results
def evaluate_ensemble(ensemble_results, school_type, intermediate_contact_weight,
far_contact_weight, age_transmission_discount,
outbreak_size, group_distribution):
'''
Utility function to calculate the error measures (chi-squared distance and
sum of squared differences) between an ensemble of simulation runs for a
given school type and parameter combination and the empirical outbreak size
distribution and ratio of infected students vs. infected teachers.
Parameters:
-----------
ensemble_results: pandas DataFrame
Data Frame holding the observable of interest of the ensemble, namely
the number of infected students and teachers.
school_type : string
School type for which the ensemble was run. Can be "primary",
"primary_dc", "lower_secondary", "lower_secondary_dc",
"upper_secondary", "secondary" or "secondary_dc".
intermediate_contact_weight : float
Weight of contacts of type "intermediate" (as compared to household)
contacts. This parameter needs to be calibrated and is varied between
ensembles. Note: This parameter is formulated in terms of a "weight",
i.e. a multiplicative factor to the intensity of the household contact
(which is 1 by default). This is different from the "probability of"
failure formulation of the factor in the Bernoulli trial notation. The
probability of failure is 1 - the contact weight.
far_contact_weight : float
Weight of contacts of type "far" (as compared to household)
contacts. This parameter needs to be calibrated and is varied between
ensembles. Similar to intermediate_contact_weight, this parameter is
formulated as a weight.
age_transmission_discount : float
Factor by which younger children are less likely to receive and transmit
an infection. More specifically, the age_transmission_discount is the
slope of a piecewise linear function that is 1 at age 18 (and above)
and decreases for younger ages. This parameter needs to be calibrated
and is varied between ensembles.
outbreak_size : pandas DataFrame
Data frame holding the empirical outbreak size observations. The
outbreak size has to be given in the column "size", the school type in
the column "type".
group_distributions : pandas DataFrame
Data frame holding the empirical observations of the ratio of infections
in a given group (student, teacher) as compared to the overall number of
infections (students + teachers). The data frame has three columns:
"school_type", "group" and "ratio", where "group" indicates which group
(student or teacher) the number in "ratio" belongs to.
Returns:
--------
row : dictionary
Dictionary holding the school type, values for the calibration
parameters (intermediate_contact_weight, far_contact_weight,
age_transmission_discount) and the values of the respective error terms
for the outbreak size distribution and group case distribution.
'''
# calculate the differences between the expected and observed outbreak sizes
# and the distribution of cases to the two agent groups
chi2_distance_size, sum_of_squares_size = calculate_distribution_difference(\
school_type, ensemble_results, outbreak_size)
chi2_distance_distro, sum_of_squares_distro = calculate_group_case_difference(\
school_type, ensemble_results, group_distribution)
row = {
'school_type':school_type,
'intermediate_contact_weight':intermediate_contact_weight,
'far_contact_weight':far_contact_weight,
'age_transmission_discount':age_transmission_discount,
'chi2_distance_size':chi2_distance_size,
'sum_of_squares_size':sum_of_squares_size,
'chi2_distance_distro':chi2_distance_distro,
'sum_of_squares_distro':sum_of_squares_distro,
'chi2_distance_total':chi2_distance_size + sum_of_squares_distro,
}
return row
def get_ensemble_parameters_from_filename(f):
'''
Extracts the simulation parameters for an ensemble given its ensemble file
name string.
Parameters:
-----------
f : string of the form school_type-{}_icw-{}_fcw-{}_atd-{}.csv that encodes
the ensemble parameter for the school type, intermediate contact weight
(icw), far contact weight (fcw) and age transmission discount (atd).
The parameters icw, fcw and atd are floats with a precision of two
decimal places.
Returns:
--------
params : dict
Dict with the fields school_type (str), icw (float), fcw (float) and
atd (float), which hold the simulation parameters of the ensemble.
'''
school_type = f.split('_icw')[0].replace('school_type-', '')
icw = round(float(f.split('icw-')[1].split('_fcw')[0]), 2)
fcw = round(float(f.split('fcw-')[1].split('_atd')[0]), 2)
atd = round(float(f.split('atd-')[1].split('.csv')[0]), 2)
params = {'school_type':school_type, 'icw':icw, 'fcw':fcw, 'atd':atd}
return params
def calculate_ensemble_distributions(ep, src, dst):
'''
Calculate the number of infected students and teachers in a simulation (sub-
tracting the index case) from the simulation data saved for the ensemble.
Parameters:
-----------
ep : tuple
Tuple holding the ensemble parameters (number of runs, school type,
intermediate contact weight, far contact weight, age trans. discount).
src : string
Absolute or relative path to the folder holding all ensemble data.
dst : string
Absolute or relative path to the folder in which the distribution of
infected will be saved.
'''
_, school_type, icw, fcw, atd = ep
icw = round(icw, 2)
fcw = round(fcw, 2)
ensemble_results = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 27 19:51:30 2018
@author: alber
"""
import os
import glob
import pandas as pd
import re
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
import numpy as np
global stemmer
import pickle
stemmer = SnowballStemmer("english")
def word_tf_idf(documento):
words = []
word_list = []
df_pattern = pd.DataFrame()
i = 0
# Hago la tokenizacion
for utterance in documento:
# Tokenizo cada frase
w = re.findall(r'\w+', utterance.lower(),flags = re.UNICODE) # Paso a minusculas todo
words = w
# Eliminación de las stop_words
words = [word for word in words if word not in stopwords.words('english')]
# Elimino guiones y otros simbolos raros
words = [word for word in words if not word.isdigit()] # Elimino numeros
# Stemming y eliminación de duplicados
words = [stemmer.stem(w) for w in words]
# Inicializo la bolsa de palabras
pattern_words = words
df = pd.DataFrame(pattern_words)
df['ocurrencias'] = 1
df.columns = ['palabras', 'ocurrencias']
df = df.groupby(['palabras'])['ocurrencias'].sum() # En este pundo, al pasarlo a indices, se ordenan
df = | pd.DataFrame(df) | pandas.DataFrame |
import argparse
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scikit_posthocs as sp
import seaborn as sns
from pingouin import kruskal
from statannot import add_stat_annotation
def parse_args(args):
parser = argparse.ArgumentParser(description="GC_content_plots")
parser.add_argument(
"file_names",
type=str,
help="Name of folder and filenames for the promoters extracted",
)
parser.add_argument(
"cv_gene_categories",
type=str,
help="Input location of coefficient of variation gene categories text file",
)
parser.add_argument(
"tau_gene_categories",
type=str,
help="Input location of tau tissue specific gene categories text file",
)
parser.add_argument(
"GC_content_tsv",
type=str,
help="Input location of promoters GC_content tsv file",
)
parser.add_argument(
"output_folder_name",
type=str,
help="Optional output folder name ending in a forward slash",
default="",
nargs="?",
)
parser.add_argument(
"palette_cv",
type=str,
help="Optional replacement colour palette for cv categories",
default=None,
nargs="?",
)
parser.add_argument(
"palette_tau",
type=str,
help="Optional replacement colour palette for tau categories",
default=None,
nargs="?",
)
return parser.parse_args(
args
) # let argparse grab args from sys.argv itself to allow for testing in module import
def rep_sample(df, col, n, random_state):
"""function to return a df with equal sample sizes
taken from here: https://stackoverflow.com/questions/39457762/python-pandas-conditionally-select-a-uniform-sample-from-a-dataframe"""
# identify number of categories
nu = df[col].nunique()
# find number of rows
# m = len(df)
# integar divide total sample size by number of categories
mpb = n // nu
# multiply this by the number of categories and subtract from the number of samples to find the remainder
mku = n - mpb * nu
# make an array fileld with zeros corresponding to each category
fills = np.zeros(nu)
# make values in the array 1s up until the remainder
fills[:mku] = 1
# calculate sample sizes for each category
sample_sizes = (np.ones(nu) * mpb + fills).astype(int)
# group the df by categories
gb = df.groupby(col)
# define sample size function
def sample(sub_df, i):
return sub_df.sample(sample_sizes[i], random_state=random_state)
# sample = lambda sub_df, i: sub_df.sample(
# sample_sizes[i], random_state=random_state
# )
# run sample size function on each category
subs = [sample(sub_df, i) for i, (_, sub_df) in enumerate(gb)]
# return concatenated sub dfs
return pd.concat(subs)
def read_GC_file(GC_content_tsv):
"""read in GC file and make extra columns"""
# read in GC content tsv
GC_content = | pd.read_table(GC_content_tsv, sep="\t", header=None) | pandas.read_table |
'''
Created on April 15, 2012
Last update on July 18, 2015
@author: <NAME>
@author: <NAME>
@author: <NAME>
'''
import pandas as pd
class Columns(object):
OPEN='Open'
HIGH='High'
LOW='Low'
CLOSE='Close'
VOLUME='Volume'
# def get(df, col):
# return(df[col])
# df['Close'] => get(df, COL.CLOSE)
# price=COL.CLOSE
indicators=["MA", "EMA", "MOM", "ROC", "ATR", "BBANDS", "PPSR", "STOK", "STO",
"TRIX", "ADX", "MACD", "MassI", "Vortex", "KST", "RSI", "TSI", "ACCDIST",
"Chaikin", "MFI", "OBV", "FORCE", "EOM", "CCI", "COPP", "KELCH", "ULTOSC",
"DONCH", "STDDEV"]
class Settings(object):
join=True
col=Columns()
SETTINGS=Settings()
def out(settings, df, result):
if not settings.join:
return result
else:
df=df.join(result)
return df
def MA(df, n, price='Close'):
"""
Moving Average
"""
name='MA_{n}'.format(n=n)
result = pd.Series(pd.rolling_mean(df[price], n), name=name)
return out(SETTINGS, df, result)
def EMA(df, n, price='Close'):
"""
Exponential Moving Average
"""
result=pd.Series(pd.ewma(df[price], span=n, min_periods=n - 1), name='EMA_' + str(n))
return out(SETTINGS, df, result)
def MOM(df, n, price='Close'):
"""
Momentum
"""
result=pd.Series(df[price].diff(n), name='Momentum_' + str(n))
return out(SETTINGS, df, result)
def ROC(df, n, price='Close'):
"""
Rate of Change
"""
M = df[price].diff(n - 1)
N = df[price].shift(n - 1)
result = pd.Series(M / N, name='ROC_' + str(n))
return out(SETTINGS, df, result)
def ATR(df, n):
"""
Average True Range
"""
i = 0
TR_l = [0]
while i < len(df) - 1: # df.index[-1]:
# for i, idx in enumerate(df.index)
# TR=max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR = max(df['High'].iloc[i + 1], df['Close'].iloc[i] - min(df['Low'].iloc[i + 1], df['Close'].iloc[i]))
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
result = pd.Series(pd.ewma(TR_s, span=n, min_periods=n), name='ATR_' + str(n))
return out(SETTINGS, df, result)
def BBANDS(df, n, price='Close'):
"""
Bollinger Bands
"""
MA = pd.Series(pd.rolling_mean(df[price], n))
MSD = pd.Series(pd.rolling_std(df[price], n))
b1 = 4 * MSD / MA
B1 = pd.Series(b1, name='BollingerB_' + str(n))
b2 = (df[price] - MA + 2 * MSD) / (4 * MSD)
B2 = pd.Series(b2, name='Bollinger%b_' + str(n))
result = pd.DataFrame([B1, B2]).transpose()
return out(SETTINGS, df, result)
def PPSR(df):
"""
Pivot Points, Supports and Resistances
"""
PP = pd.Series((df['High'] + df['Low'] + df['Close']) / 3)
R1 = pd.Series(2 * PP - df['Low'])
S1 = pd.Series(2 * PP - df['High'])
R2 = pd.Series(PP + df['High'] - df['Low'])
S2 = pd.Series(PP - df['High'] + df['Low'])
R3 = pd.Series(df['High'] + 2 * (PP - df['Low']))
S3 = | pd.Series(df['Low'] - 2 * (df['High'] - PP)) | pandas.Series |
import numpy as np
import pandas as pd
from tqdm import tqdm
from collections import Counter
class AutoDatatyper(object):
def __init__(self, vector_dim=300, num_rows=1000):
self.vector_dim = vector_dim
self.num_rows = num_rows
self.decode_dict = {0: 'numeric', 1: 'character', 2: 'time', 3: 'complex'}
def create_dataset_from_data_column(self, iterable, label):
iterable_str = self.__remove_na_and_stringify_iterable(iterable)
choice_range = len(iterable_str)
vector_list = []
for i in tqdm(list(range(self.num_rows))):
try:
vec = self.__get_sample_from_column_data(iterable_str, choice_range)
except ValueError:
raise ValueError('All data are NaNs.')
vector_list.append(vec)
return np.array(vector_list), np.array([label] * self.num_rows).reshape(-1, 1)
def __remove_na_and_stringify_iterable(self, iterable):
# Convert iterable to Series
if not isinstance(iterable, pd.Series):
iterable = | pd.Series(iterable) | pandas.Series |
from __future__ import division
import copy
import bt
from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
def test_node_tree():
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2])
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert 'c1' in p.children
assert 'c2' in p.children
assert p == c1.parent
assert p == c2.parent
m = Node('m', children=[p])
p = m['p']
c1 = p['c1']
c2 = p['c2']
assert len(m.children) == 1
assert 'p' in m.children
assert p.parent == m
assert len(p.children) == 2
assert 'c1' in p.children
assert 'c2' in p.children
assert p == c1.parent
assert p == c2.parent
def test_strategybase_tree():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
assert len(s.children) == 2
assert 's1' in s.children
assert 's2' in s.children
assert s == s1.parent
assert s == s2.parent
def test_node_members():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
actual = s.members
assert len(actual) == 3
assert s1 in actual
assert s2 in actual
assert s in actual
actual = s1.members
assert len(actual) == 1
assert s1 in actual
actual = s2.members
assert len(actual) == 1
assert s2 in actual
def test_node_full_name():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
# we cannot access s1 and s2 directly since they are copied
# we must therefore access through s
assert s.full_name == 'p'
assert s['s1'].full_name == 'p>s1'
assert s['s2'].full_name == 'p>s2'
def test_security_setup_prices():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
# now with setup
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
def test_strategybase_tree_setup():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
assert len(s.data) == 3
assert len(c1.data) == 3
assert len(c2.data) == 3
assert len(s._prices) == 3
assert len(c1._prices) == 3
assert len(c2._prices) == 3
assert len(s._values) == 3
assert len(c1._values) == 3
assert len(c2._values) == 3
def test_strategybase_tree_adjust():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
assert s.capital == 1000
assert s.value == 1000
assert c1.value == 0
assert c2.value == 0
assert c1.weight == 0
assert c2.weight == 0
def test_strategybase_tree_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
c1.price == 100
c2.price == 100
i = 1
s.update(dts[i], data.ix[dts[i]])
c1.price == 105
c2.price == 95
i = 2
s.update(dts[i], data.ix[dts[i]])
c1.price == 100
c2.price == 100
def test_update_fails_if_price_is_nan_and_position_open():
c1 = SecurityBase('c1')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100)
data['c1'][dts[1]] = np.nan
c1.setup(data)
i = 0
# mock in position
c1._position = 100
c1.update(dts[i], data.ix[dts[i]])
# test normal case - position & non-nan price
assert c1._value == 100 * 100
i = 1
# this should fail, because we have non-zero position, and price is nan, so
# bt has no way of updating the _value
try:
c1.update(dts[i], data.ix[dts[i]])
assert False
except Exception as e:
assert str(e).startswith('Position is open')
# on the other hand, if position was 0, this should be fine, and update
# value to 0
c1._position = 0
c1.update(dts[i], data.ix[dts[i]])
assert c1._value == 0
def test_strategybase_tree_allocate():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_child_from_strategy():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate to c1
s.allocate(500, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.ix[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.allocate(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
s1.allocate(500)
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
c1.allocate(200)
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
def test_strategybase_tree_allocate_long_short():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
c1.allocate(-200)
assert c1.position == 3
assert c1.value == 300
assert c1.weight == 300.0 / 1000
assert s.capital == 1000 - 500 + 200
assert s.value == 1000
c1.allocate(-400)
assert c1.position == -1
assert c1.value == -100
assert c1.weight == -100.0 / 1000
assert s.capital == 1000 - 500 + 200 + 400
assert s.value == 1000
# close up
c1.allocate(-c1.value)
assert c1.position == 0
assert c1.value == 0
assert c1.weight == 0
assert s.capital == 1000 - 500 + 200 + 400 - 100
assert s.value == 1000
def test_strategybase_tree_allocate_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
assert s.price == 100
s.adjust(1000)
assert s.price == 100
assert s.value == 1000
assert s._value == 1000
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
assert s.price == 100
i = 1
s.update(dts[i], data.ix[dts[i]])
assert c1.position == 5
assert c1.value == 525
assert c1.weight == 525.0 / 1025
assert s.capital == 1000 - 500
assert s.value == 1025
assert np.allclose(s.price, 102.5)
def test_strategybase_universe():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
assert len(s.universe) == 1
assert 'c1' in s.universe
assert 'c2' in s.universe
assert s.universe['c1'][dts[i]] == 105
assert s.universe['c2'][dts[i]] == 95
# should not have children unless allocated
assert len(s.children) == 0
def test_strategybase_allocate():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
def test_strategybase_close():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
s.close('c1')
assert c1.position == 0
assert c1.value == 0
assert s.value == 1000
def test_strategybase_flatten():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
s.allocate(100, 'c2')
c2 = s['c2']
assert c1.position == 1
assert c1.value == 100
assert c2.position == 1
assert c2.value == 100
assert s.value == 1000
s.flatten()
assert c1.position == 0
assert c1.value == 0
assert s.value == 1000
def test_strategybase_multiple_calls():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.ix[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 0
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 1
assert s.value == 1000
assert s.capital == 50
c2 = s['c2']
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update out t0
s.update(dts[i])
c2 == s['c2']
assert len(s.children) == 1
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1050
assert s.capital == 50
assert len(s.children) == 1
assert 'c2' in s.children
c2 == s['c2']
assert c2.value == 1000
assert c2.weight == 1000.0 / 1050.0
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
c1 = s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update out t1
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
assert c1 == s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 5
assert c1.value == 1100
assert c1.weight == 1100.0 / 1105
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
def test_strategybase_multiple_calls_preset_secs():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('s', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.ix[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 2
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update out t0
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1050
assert s.capital == 50
assert len(s.children) == 2
assert c2.value == 1000
assert c2.weight == 1000.0 / 1050.
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
# update out t1
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 5
assert c1.value == 1100
assert c1.weight == 1100.0 / 1105
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
def test_strategybase_multiple_calls_no_post_update():
s = StrategyBase('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.ix[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 0
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 1
assert s.value == 999
assert s.capital == 49
c2 = s['c2']
assert c2.value == 950
assert c2.weight == 950.0 / 999
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1049
assert s.capital == 49
assert len(s.children) == 1
assert 'c2' in s.children
c2 == s['c2']
assert c2.value == 1000
assert c2.weight == 1000.0 / 1049.0
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert len(s.children) == 2
assert s.value == 1047
assert s.capital == 2
c1 = s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1047
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1102
assert s.capital == 2
assert c1.value == 1100
assert c1.weight == 1100.0 / 1102
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1100
assert s.capital == 55
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1100
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1100
assert s.capital == 55
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1100
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1098
assert s.capital == 53
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1098
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1098
assert s.capital == 53
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1098
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1096
assert s.capital == 51
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1096
assert c2.price == 95
def test_strategybase_prices():
dts = pd.date_range('2010-01-01', periods=21)
rawd = [13.555, 13.75, 14.16, 13.915, 13.655,
13.765, 14.02, 13.465, 13.32, 14.65,
14.59, 14.175, 13.865, 13.865, 13.89,
13.85, 13.565, 13.47, 13.225, 13.385,
12.89]
data = pd.DataFrame(index=dts, data=rawd, columns=['a'])
s = StrategyBase('s')
s.set_commissions(lambda q, p: 1)
s.setup(data)
# buy 100 shares on day 1 - hold until end
# just enough to buy 100 shares + 1$ commission
s.adjust(1356.50)
s.update(dts[0])
# allocate all capital to child a
# a should be dynamically created and should have
# 100 shares allocated. s.capital should be 0
s.allocate(s.value, 'a')
assert s.capital == 0
assert s.value == 1355.50
assert len(s.children) == 1
aae(s.price, 99.92628, 5)
a = s['a']
assert a.position == 100
assert a.value == 1355.50
assert a.weight == 1
assert a.price == 13.555
assert len(a.prices) == 1
# update through all dates and make sure price is ok
s.update(dts[1])
aae(s.price, 101.3638, 4)
s.update(dts[2])
aae(s.price, 104.3863, 4)
s.update(dts[3])
aae(s.price, 102.5802, 4)
# finish updates and make sure ok at end
for i in range(4, 21):
s.update(dts[i])
assert len(s.prices) == 21
aae(s.prices[-1], 95.02396, 5)
aae(s.prices[-2], 98.67306, 5)
def test_fail_if_root_value_negative():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
s.setup(data)
s.adjust(-100)
# trigger update
s.update(dts[0])
assert s.bankrupt
# make sure only triggered if root negative
c1 = StrategyBase('c1')
s = StrategyBase('s', children=[c1])
c1 = s['c1']
s.setup(data)
s.adjust(1000)
c1.adjust(-100)
s.update(dts[0])
# now make it trigger
c1.adjust(-1000)
# trigger update
s.update(dts[0])
assert s.bankrupt
def test_fail_if_0_base_in_return_calc():
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
# must setup tree because if not negative root error pops up first
c1 = StrategyBase('c1')
s = StrategyBase('s', children=[c1])
c1 = s['c1']
s.setup(data)
s.adjust(1000)
c1.adjust(100)
s.update(dts[0])
c1.adjust(-100)
s.update(dts[1])
try:
c1.adjust(-100)
s.update(dts[1])
assert False
except ZeroDivisionError as e:
if 'Could not update' not in str(e):
assert False
def test_strategybase_tree_rebalance():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance c1
s.rebalance(0.5, 'c1')
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
def test_strategybase_tree_decimal_position_rebalance():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.use_integer_positions(False)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000.2)
s.rebalance(0.42, 'c1')
s.rebalance(0.58, 'c2')
aae(c1.value, 420.084)
aae(c2.value, 580.116)
aae(c1.value + c2.value, 1000.2)
def test_rebalance_child_not_in_tree():
s = StrategyBase('p')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
# rebalance to 0 w/ child that is not present - should ignore
s.rebalance(0, 'c2')
assert s.value == 1000
assert s.capital == 1000
assert len(s.children) == 0
def test_strategybase_tree_rebalance_to_0():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance c1
s.rebalance(0.5, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
# now rebalance c1
s.rebalance(0, 'c1')
assert c1.position == 0
assert c1.value == 0
assert s.capital == 1000
assert s.value == 1000
assert c1.weight == 0
assert c2.weight == 0
def test_strategybase_tree_rebalance_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.ix[dts[i]])
m.adjust(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now rebalance child s1 - since its children are 0, no waterfall alloc
m.rebalance(0.5, 's1')
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
s1.rebalance(0.4, 'c1')
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
# now rebalance child s1 again and make sure c1 also gets proportional
# increase
m.rebalance(0.8, 's1')
assert s1.value == 800
aae(m.capital, 200, 1)
assert m.value == 1000
assert s1.weight == 800 / 1000
assert s2.weight == 0
assert c1.value == 300.0
assert c1.weight == 300.0 / 800
assert c1.position == 3
# now rebalance child s1 to 0 - should close out s1 and c1 as well
m.rebalance(0, 's1')
assert s1.value == 0
assert m.capital == 1000
assert m.value == 1000
assert s1.weight == 0
assert s2.weight == 0
assert c1.weight == 0
def test_strategybase_tree_rebalance_base():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# check that 2 rebalances of equal weight lead to two different allocs
# since value changes after first call
s.rebalance(0.5, 'c1')
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
s.rebalance(0.5, 'c2')
assert c2.position == 4
assert c2.value == 400
assert s.capital == 1000 - 401 - 401
assert s.value == 998
assert c2.weight == 400.0 / 998
assert c1.weight == 400.0 / 998
# close out everything
s.flatten()
# adjust to get back to 1000
s.adjust(4)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance but set fixed base
base = s.value
s.rebalance(0.5, 'c1', base=base)
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
s.rebalance(0.5, 'c2', base=base)
assert c2.position == 4
assert c2.value == 400
assert s.capital == 1000 - 401 - 401
assert s.value == 998
assert c2.weight == 400.0 / 998
assert c1.weight == 400.0 / 998
def test_algo_stack():
a1 = mock.MagicMock(return_value=True)
a2 = mock.MagicMock(return_value=False)
a3 = mock.MagicMock(return_value=True)
# no run_always for now
del a1.run_always
del a2.run_always
del a3.run_always
stack = AlgoStack(a1, a2, a3)
target = mock.MagicMock()
assert not stack(target)
assert a1.called
assert a2.called
assert not a3.called
# now test that run_always marked are run
a1 = mock.MagicMock(return_value=True)
a2 = mock.MagicMock(return_value=False)
a3 = mock.MagicMock(return_value=True)
# a3 will have run_always
del a1.run_always
del a2.run_always
stack = AlgoStack(a1, a2, a3)
target = mock.MagicMock()
assert not stack(target)
assert a1.called
assert a2.called
assert a3.called
def test_set_commissions():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.set_commissions(lambda x, y: 1.0)
s.setup(data)
s.update(dts[0])
s.adjust(1000)
s.allocate(500, 'c1')
assert s.capital == 599
s.set_commissions(lambda x, y: 0.0)
s.allocate(-400, 'c1')
assert s.capital == 999
def test_strategy_tree_proper_return_calcs():
s1 = StrategyBase('s1')
s2 = StrategyBase('s2')
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
dts = pd.date_range('2010-01-01', periods=3)
data = | pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100) | pandas.DataFrame |
# Copyright 2019-2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import logging
import traceback
import pandas as pd
from src.al.project_service import find_project_by_name, update_project
from src.al.sr_service import query_all_srs
import numpy as np
from sklearn.preprocessing import OneHotEncoder
import pickle
import json
from src.aws.s3 import upload_file_to_s3
modelDir = "models/"
log = logging.getLogger('loop_al')
def srs_vector(request):
try:
req = json.loads(request.body)
token = request.headers["Authorization"]
pro = find_project_by_name(req['projectName'])
# one hot encoding generate tickets vector model
if pro[0]['encoder'] == 'oneHot':
sr_text, obj_col, num_col, upload_file = [], [], [], None
for sr in query_all_srs(req['projectName']):
sr_text.append(sr['originalData'])
sr_text = | pd.DataFrame(sr_text) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 18 21:10:42 2022
@author: Nehal
"""
# -*- coding: utf-8 -*-
import streamlit as st
import pandas as pd
import altair as alt
import numpy as np
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import accuracy_score
def app():
# @cache
@st.cache
def load_data():
#datacsv = pd.read_csv("C:/Users/Nehal/JupyterNotebooks/TheGraph_Decentraland.csv") #To load it from local
datacsv = pd.read_csv("TheGraph_Decentraland.csv") #To load it from Github
df = pd.DataFrame(datacsv)
return df
df = load_data()
df = df.rename(columns={'price_MANA': 'current_rate_pricemana'})
# only keep relevant columns
df = df [['x','y','current_rate_pricemana','date','price_USD','createdAt']]
# Create date field
df['transaction_date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
data_file = 'mortality_germany.xlsx'
months = ['Jan', 'Feb', 'März', 'Apr', 'Mai', 'Jun', 'Jul', 'Aug', 'Sept', 'Okt', 'Nov', 'Dez']
days_per_month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] # Ignoring that Feb has 27 days in a gap year.
def prepare_1950_data():
df = pd.read_excel(data_file, sheet_name=0)
df = df.drop(['m', 'w'], axis=1)
df.columns = ['Jahr', 'Monat', 'Tote']
return df
def prepare_2020_data():
mort_daily = | pd.read_excel(data_file, index_col='Jahr', sheet_name=1) | pandas.read_excel |
import glob
import os
import numpy as np
import pandas as pd
from xml.etree import ElementTree
from ..generic.mapping_io import read_geo_image
def list_central_wavelength_re():
""" create dataframe with metadata about RapidEye
Returns
-------
df : datafram
metadata and general multispectral information about the MSI
instrument that is onboard Sentinel-2, having the following collumns:
* wavelength : central wavelength of the band
* bandwidth : extent of the spectral sensativity
* bandid : number for identification in the meta data
* resolution : spatial resolution of a pixel
* name : general name of the band, if applicable
* irradiance : exo-atmospheric radiance
unit=W/m**2 μm
* detectortime : relative sampling time difference
unit=np.timedelta64(XX, 'ns')
References
----------
.. [1] Krauß, et al. "Traffic flow estimation from single
satellite images." Archives of the ISPRS, vol.XL-1/WL3, 2013.
Notes
-----
The detector arrays of the satellite are configured as follows:
.. code-block:: text
78 mm
<-------------->
+--------------+ #*# satellite
| red | |
+--------------+ | flight
| red edge | | direction
+--------------+ |
|near infrared | v
+--------------+
| |
| |
| |
| |
+--------------+ ^
| not in use | | 6.5 μm
+--------------+ v
| green |
+--------------+
| blue |
+--------------+
Example
-------
make a selection by name:
>>> boi = ['red', 'green', 'blue']
>>> re_df = list_central_wavelength_re()
>>> re_df = re_df[re_df['name'].isin(boi)]
>>> re_df
wavelength bandwidth resolution name bandid irradiance
B01 475 70 5 blue 0 1997.8
B02 555 70 5 green 1 1863.5
B03 657 55 5 red 2 1560.4
similarly you can also select by bandwidth:
>>> re_df = list_central_wavelength_re()
>>> sb_df = re_df[re_df['bandwidth']<=60]
>>> sb_df.index
Index(['B03', 'B04'], dtype='object')
"""
wavelength = {"B01": 475, "B02": 555, "B03": 657,
"B04": 710, "B05": 805,
}
bandwidth = {"B01": 70, "B02": 70, "B03": 55, "B04": 40, "B05": 90,
}
bandid = {"B01": 0, "B02": 1, "B03": 2, "B04": 3, "B05": 4,
}
resolution = {"B01": 5, "B02": 5, "B03": 5, "B04": 5, "B05": 5,
}
name = {"B01" : 'blue', "B02" : 'green',
"B03" : 'red', "B04" : 'red edge',
"B05" : 'near infrared',
}
irradiance = {"B01": 1997.8, "B02": 1863.5,
"B03": 1560.4, "B04": 1395.0,
"B05": 1124.4,
}
detectortime = {"B01": np.timedelta64(000, 'ms'),
"B02": np.timedelta64(410, 'ms'),
"B03": np.timedelta64(820, 'ms'),
"B04": np.timedelta64(2650, 'ms'),
"B05": np.timedelta64(3060, 'ms'),
} # estimates, see [1]
d = {
"wavelength": pd.Series(wavelength),
"bandwidth": pd.Series(bandwidth),
"resolution": pd.Series(resolution),
"name": pd.Series(name),
"bandid": pd.Series(bandid),
"irradiance": | pd.Series(irradiance) | pandas.Series |
import numpy as np
import pandas as pd
from reshape_tools.make_recurrent import make_recurrent
from sample_data.make_sample_data import sample_data1, sample_data2
from nptyping import NDArray
from typing import Any, Optional
def check_results(
output: NDArray[(Any, Any, Any)],
data_input: pd.DataFrame,
n_recurrent_samples: int,
partition_by: Optional[str] = None,
):
assert data_input is not None
assert output is not None
if partition_by is not None:
n_unique = len(np.unique(data_input[partition_by]))
assert output.shape[0] == data_input.shape[0] - n_unique * (
n_recurrent_samples - 1
)
assert output.shape[1] == n_recurrent_samples
assert output.shape[2] == data_input.shape[1] - 2
else:
assert output.shape[0] == data_input.shape[0] - n_recurrent_samples + 1
assert output.shape[1] == n_recurrent_samples
assert output.shape[2] == data_input.shape[1] - 1
def test_make_recurrent_no_partitioning1():
ORDER_BY = "times"
N_RECURRENT_SAMPLES = 3
df = sample_data1()
arr, y = make_recurrent(df, N_RECURRENT_SAMPLES, ORDER_BY, verbose=True, price_column="val1", time_threshold=86400)
check_results(arr, df, N_RECURRENT_SAMPLES)
def test_make_recurrent_no_partitioning2():
ORDER_BY = "times"
N_RECURRENT_SAMPLES = 5
df = sample_data1()
arr, y = make_recurrent(df, N_RECURRENT_SAMPLES, ORDER_BY, verbose=True, price_column="val1", time_threshold=86400)
check_results(arr, df, N_RECURRENT_SAMPLES)
def test_make_recurrent_no_partitioning3():
ORDER_BY = "times"
N_RECURRENT_SAMPLES = 3
df = sample_data2()
arr, y = make_recurrent(df, N_RECURRENT_SAMPLES, ORDER_BY, verbose=True, price_column="val1", time_threshold=86400)
check_results(arr, df, N_RECURRENT_SAMPLES)
def test_make_recurrent_no_partitioning4():
ORDER_BY = "times"
N_RECURRENT_SAMPLES = 5
df = sample_data2()
arr, y = make_recurrent(df, N_RECURRENT_SAMPLES, ORDER_BY, verbose=True, price_column="val1", time_threshold=86400)
check_results(arr, df, N_RECURRENT_SAMPLES)
def test_make_recurrent_partitioning1():
ORDER_BY = "times"
PARTITION_BY = "month"
N_RECURRENT_SAMPLES = 3
df = sample_data1()
df["month"] = pd.to_datetime(df["times"]).dt.to_period("M")
arr, y = make_recurrent(df, N_RECURRENT_SAMPLES, ORDER_BY, PARTITION_BY, verbose=True, price_column="val1", time_threshold=86400)
check_results(arr, df, N_RECURRENT_SAMPLES, PARTITION_BY)
def test_make_recurrent_partitioning2():
ORDER_BY = "times"
PARTITION_BY = "month"
N_RECURRENT_SAMPLES = 4
df = sample_data1()
df[PARTITION_BY] = pd.to_datetime(df["times"]).dt.to_period("M")
arr, y = make_recurrent(df, N_RECURRENT_SAMPLES, ORDER_BY, PARTITION_BY, verbose=True, price_column="val1", time_threshold=86400)
check_results(arr, df, N_RECURRENT_SAMPLES, PARTITION_BY)
def test_make_recurrent_partitioning3():
ORDER_BY = "times"
PARTITION_BY = "month"
N_RECURRENT_SAMPLES = 5
df = sample_data1()
df[PARTITION_BY] = pd.to_datetime(df["times"]).dt.to_period("M")
arr, y = make_recurrent(df, N_RECURRENT_SAMPLES, ORDER_BY, PARTITION_BY, verbose=True, price_column="val1", time_threshold=86400)
check_results(arr, df, N_RECURRENT_SAMPLES, PARTITION_BY)
def test_make_recurrent_partitioning4():
ORDER_BY = "times"
PARTITION_BY = "day"
N_RECURRENT_SAMPLES = 3
df = sample_data2()
df[PARTITION_BY] = pd.to_datetime(df["times"]).dt.to_period("D")
arr, y = make_recurrent(df, N_RECURRENT_SAMPLES, ORDER_BY, PARTITION_BY, verbose=True, price_column="val1", time_threshold=86400)
check_results(arr, df, N_RECURRENT_SAMPLES, PARTITION_BY)
def test_make_recurrent_partitioning5():
ORDER_BY = "times"
PARTITION_BY = "day"
N_RECURRENT_SAMPLES = 4
df = sample_data2()
df[PARTITION_BY] = pd.to_datetime(df["times"]).dt.to_period("D")
arr, y = make_recurrent(df, N_RECURRENT_SAMPLES, ORDER_BY, PARTITION_BY, verbose=True, price_column="val1", time_threshold=86400)
check_results(arr, df, N_RECURRENT_SAMPLES, PARTITION_BY)
def test_make_recurrent_partitioning6():
ORDER_BY = "times"
PARTITION_BY = "day"
N_RECURRENT_SAMPLES = 5
df = sample_data2()
df[PARTITION_BY] = | pd.to_datetime(df["times"]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
import pytz
from freezegun import freeze_time
from pandas import Timestamp
from pandas._testing import assert_frame_equal
from wetterdienst.exceptions import StartDateEndDateError
from wetterdienst.metadata.period import Period
from wetterdienst.metadata.resolution import Resolution
from wetterdienst.metadata.timezone import Timezone
from wetterdienst.provider.dwd.observation import (
DwdObservationDataset,
DwdObservationPeriod,
DwdObservationResolution,
)
from wetterdienst.provider.dwd.observation.api import DwdObservationRequest
from wetterdienst.provider.dwd.observation.metadata.parameter import (
DwdObservationParameter,
)
from wetterdienst.settings import Settings
def test_dwd_observation_data_api():
request = DwdObservationRequest(
parameter=["precipitation_height"],
resolution="daily",
period=["recent", "historical"],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationParameter.DAILY.PRECIPITATION_HEIGHT],
resolution=Resolution.DAILY,
period=[Period.HISTORICAL, Period.RECENT],
start_date=None,
end_date=None,
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.CLIMATE_SUMMARY.PRECIPITATION_HEIGHT,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
@pytest.mark.remote
def test_dwd_observation_data_dataset():
"""Request a parameter set"""
expected = DwdObservationRequest(
parameter=["kl"],
resolution="daily",
period=["recent", "historical"],
).filter_by_station_id(station_id=(1,))
given = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
start_date=None,
end_date=None,
).filter_by_station_id(
station_id=(1,),
)
assert given == expected
expected = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
).filter_by_station_id(
station_id=(1,),
)
given = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
start_date=None,
end_date=None,
).filter_by_station_id(
station_id=(1,),
)
assert expected == given
assert expected.parameter == [
(
DwdObservationDataset.CLIMATE_SUMMARY,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
def test_dwd_observation_data_parameter():
"""Test parameter given as single value without dataset"""
request = DwdObservationRequest(
parameter=["precipitation_height"],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.CLIMATE_SUMMARY.PRECIPITATION_HEIGHT,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
request = DwdObservationRequest(
parameter=["climate_summary"],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [(DwdObservationDataset.CLIMATE_SUMMARY, DwdObservationDataset.CLIMATE_SUMMARY)]
def test_dwd_observation_data_parameter_dataset_pairs():
"""Test parameters given as parameter - dataset pair"""
request = DwdObservationRequest(
parameter=[("climate_summary", "climate_summary")],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [(DwdObservationDataset.CLIMATE_SUMMARY, DwdObservationDataset.CLIMATE_SUMMARY)]
request = DwdObservationRequest(
parameter=[("precipitation_height", "precipitation_more")],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.PRECIPITATION_MORE.PRECIPITATION_HEIGHT,
DwdObservationDataset.PRECIPITATION_MORE,
)
]
@pytest.mark.remote
def test_dwd_observation_data_fails():
# station id
assert (
DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
period=[DwdObservationPeriod.HISTORICAL],
resolution=DwdObservationResolution.DAILY,
)
.filter_by_station_id(
station_id=["test"],
)
.df.empty
)
with pytest.raises(StartDateEndDateError):
DwdObservationRequest(
parameter=["abc"],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date="1951-01-01",
)
def test_dwd_observation_data_dates():
# time input
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
).filter_by_station_id(
station_id=[1],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[
DwdObservationPeriod.HISTORICAL,
],
start_date=datetime(1971, 1, 1),
end_date=datetime(1971, 1, 1),
).filter_by_station_id(
station_id=[1],
)
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL],
end_date="1971-01-01",
).filter_by_station_id(
station_id=[1],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[
DwdObservationPeriod.HISTORICAL,
],
start_date=datetime(1971, 1, 1),
end_date=datetime(1971, 1, 1),
).filter_by_station_id(
station_id=[1],
)
with pytest.raises(StartDateEndDateError):
DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date="1951-01-01",
)
def test_request_period_historical():
# Historical period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
)
assert request.period == [
Period.HISTORICAL,
]
def test_request_period_historical_recent():
# Historical and recent period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(days=400),
)
assert request.period == [
Period.HISTORICAL,
Period.RECENT,
]
def test_request_period_historical_recent_now():
# Historical, recent and now period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date=pd.Timestamp(datetime.utcnow()),
)
assert request.period == [
Period.HISTORICAL,
Period.RECENT,
Period.NOW,
]
@freeze_time(datetime(2022, 1, 29, 1, 30, tzinfo=pytz.timezone(Timezone.GERMANY.value)))
def test_request_period_recent_now():
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert request.period == [Period.RECENT, Period.NOW]
@freeze_time(datetime(2022, 1, 29, 2, 30, tzinfo=pytz.timezone(Timezone.GERMANY.value)))
def test_request_period_now():
# Now period
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert request.period == [Period.NOW]
@freeze_time("2021-03-28T18:38:00+02:00")
def test_request_period_now_fixeddate():
# Now period
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert Period.NOW in request.period
def test_request_period_empty():
# No period (for example in future)
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) + pd.Timedelta(days=720),
)
assert request.period == []
@pytest.mark.remote
def test_dwd_observation_data_result_missing_data():
"""Test for DataFrame having empty values for dates where the station should not
have values"""
Settings.tidy = True
Settings.humanize = True
Settings.si_units = True
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-27", # few days before official start
end_date="1934-01-04", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
# Leave only one column to potentially contain NaN which is VALUE
df = request.values.all().df.drop("quality", axis=1)
df_1933 = df[df["date"].dt.year == 1933]
df_1934 = df[df["date"].dt.year == 1934]
assert not df_1933.empty and df_1933.dropna().empty
assert not df_1934.empty and not df_1934.dropna().empty
request = DwdObservationRequest(
parameter=DwdObservationParameter.HOURLY.TEMPERATURE_AIR_MEAN_200,
resolution=DwdObservationResolution.HOURLY,
start_date="2020-06-09 12:00:00", # no data at this time (reason unknown)
end_date="2020-06-09 12:00:00",
).filter_by_station_id(
station_id=["03348"],
)
df = request.values.all().df
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["03348"]),
"dataset": pd.Categorical(["temperature_air"]),
"parameter": pd.Categorical(["temperature_air_mean_200"]),
"date": [datetime(2020, 6, 9, 12, 0, 0, tzinfo=pytz.UTC)],
"value": pd.Series([pd.NA], dtype=pd.Float64Dtype()).astype(float),
"quality": pd.Series([pd.NA], dtype=pd.Float64Dtype()).astype(float),
}
),
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_result_tabular():
"""Test for actual values (tabular)"""
Settings.tidy = False
Settings.humanize = False
Settings.si_units = False
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-31", # few days before official start
end_date="1934-01-01", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
df = request.values.all().df
assert list(df.columns.values) == [
"station_id",
"dataset",
"date",
"qn_3",
"fx",
"fm",
"qn_4",
"rsk",
"rskf",
"sdk",
"shk_tag",
"nm",
"vpm",
"pm",
"tmk",
"upm",
"txk",
"tnk",
"tgk",
]
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["01048"] * 2),
"dataset": pd.Categorical(["climate_summary"] * 2),
"date": [
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
],
"qn_3": pd.Series([pd.NA, pd.NA], dtype=pd.Int64Dtype()),
"fx": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"fm": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"qn_4": pd.Series([pd.NA, 1], dtype=pd.Int64Dtype()),
"rsk": pd.to_numeric([pd.NA, 0.2], errors="coerce"),
"rskf": pd.to_numeric([pd.NA, 8], errors="coerce"),
"sdk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"shk_tag": pd.Series([pd.NA, 0], dtype=pd.Int64Dtype()),
"nm": pd.to_numeric([pd.NA, 8.0], errors="coerce"),
"vpm": pd.to_numeric([pd.NA, 6.4], errors="coerce"),
"pm": pd.to_numeric([pd.NA, 1008.60], errors="coerce"),
"tmk": pd.to_numeric([pd.NA, 0.5], errors="coerce"),
"upm": pd.to_numeric([pd.NA, 97.00], errors="coerce"),
"txk": pd.to_numeric([pd.NA, 0.7], errors="coerce"),
"tnk": pd.to_numeric([pd.NA, 0.2], errors="coerce"),
"tgk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
}
),
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_result_tabular_metric():
"""Test for actual values (tabular) in metric units"""
Settings.tidy = False
Settings.humanize = False
Settings.si_units = True
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-31", # few days before official start
end_date="1934-01-01", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
df = request.values.all().df
assert list(df.columns.values) == [
"station_id",
"dataset",
"date",
"qn_3",
"fx",
"fm",
"qn_4",
"rsk",
"rskf",
"sdk",
"shk_tag",
"nm",
"vpm",
"pm",
"tmk",
"upm",
"txk",
"tnk",
"tgk",
]
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["01048"] * 2),
"dataset": pd.Categorical(["climate_summary"] * 2),
"date": [
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
],
"qn_3": pd.Series([pd.NA, pd.NA], dtype=pd.Int64Dtype()),
"fx": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"fm": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"qn_4": pd.Series([pd.NA, 1], dtype=pd.Int64Dtype()),
"rsk": pd.to_numeric([pd.NA, 0.2], errors="coerce"),
"rskf": pd.to_numeric([pd.NA, 8], errors="coerce"),
"sdk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"shk_tag": pd.Series([pd.NA, 0], dtype=pd.Int64Dtype()),
"nm": pd.to_numeric([pd.NA, 100.0], errors="coerce"),
"vpm": pd.to_numeric([pd.NA, 640.0], errors="coerce"),
"pm": pd.to_numeric([pd.NA, 100860.0], errors="coerce"),
"tmk": pd.to_numeric([pd.NA, 273.65], errors="coerce"),
"upm": pd.to_numeric([pd.NA, 97.00], errors="coerce"),
"txk": pd.to_numeric([pd.NA, 273.84999999999997], errors="coerce"),
"tnk": pd.to_numeric([pd.NA, 273.34999999999997], errors="coerce"),
"tgk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
}
),
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_result_tidy_metric():
"""Test for actual values (tidy) in metric units"""
Settings.tidy = True
Settings.humanize = False
Settings.si_units = True
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-31", # few days before official start
end_date="1934-01-01", # few days after official start,
).filter_by_station_id(
station_id=(1048,),
)
df = request.values.all().df
assert list(df.columns.values) == [
"station_id",
"dataset",
"parameter",
"date",
"value",
"quality",
]
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["01048"] * 28),
"dataset": | pd.Categorical(["climate_summary"] * 28) | pandas.Categorical |
from re import S
from numpy.core.numeric import NaN
import streamlit as st
import pandas as pd
import numpy as np
st.title('world gdp')
@st.cache
def load_data(path):
data = pd.read_csv(path)
data.columns = data.columns.str.lower()
return data
data = load_data("data/gdp.csv")
if st.checkbox('show raw data'):
st.write(data)
if st.checkbox('Show all gdp'):
st.subheader('all(color is too much, so the id is not useful)')
# all_data = pd.DataFrame(data.values.T, index=data.columns, columns=data["country name"].unique())[4:]
all_data = | pd.DataFrame(data.values.T, index=data.columns, columns=data.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This file combines all data loading methods into a central location.
Each type of data has a class that retrieves, processes, and checks it.
Each class has the following methods:
get - retrieves raw data from a source
adapt - transforms from the raw data to the common processed format
check - performs some format checking to see if the processed data looks right
process - does all the above
Additionally, each class then has source specific handlers.
E.g. there might be a get_url and a get_csv for a given class
and then an adapt_phe and an adapt_hps method to format the data
If pulled from an external source (e.g. url), the raw data can be stored
by setting the config['GenerateOutput']['storeInputs'] flag to be True.
These will be stored in the data/ folder
The processed output can be stored by setting the config['GenerateOutput']['storeProcessedInputs']
flag to be true, which will store the data in processed_data/
@authors: <NAME>, <NAME>
"""
import os
import sys
import yaml
import pandas as pd
import re
import requests
import io
import json
import zipfile
from http import HTTPStatus
from bs4 import BeautifulSoup
from collections import Counter
from datetime import datetime
import pickle
import h5py
import numpy as np
from covid import data as LancsData
# import model_spec
# DTYPE = model_spec.DTYPE
DTYPE = np.float64
def CovarData(config):
# Return data and covar data structs
data = {}
data['areas'] = AreaCodeData.process(config)
data['commute'] = InterLadCommuteData.process(config)
data['cases_tidy'] = CasesData.process(config)
data['cases_wide'] = data['cases_tidy'].pivot(index="lad19cd", columns="date", values="cases")
data['mobility'] = MobilityTrendData.process(config)
data['population'] = PopulationData.process(config)
data['tier'] = TierData.process(config)
# Check dimensions are consistent
check_aligned(data)
print('Data passes allignment check')
# put it into covar data form
covar_data = dict(
C=data['commute'].to_numpy().astype(DTYPE),
W=data['mobility'].to_numpy().astype(DTYPE),
N=data['population'].to_numpy().astype(DTYPE),
L=data['tier'].astype(DTYPE),
weekday=config['dates']['weekday'].astype(DTYPE),
)
return data, covar_data
class TierData:
def get(config):
"""
Retrieve an xarray DataArray of the tier data
"""
settings = config['TierData']
if settings['input'] == 'csv':
df = TierData.getCSV(settings['address'])
else:
invalidInput(settings['input'])
return df
def getCSV(file):
"""
Read TierData CSV from file
"""
return pd.read_csv(file)
def check(xarray, config):
"""
Check the data format
"""
return True
def adapt(df, config):
"""
Adapt the dataframe to the desired format.
"""
global_settings = config["Global"]
settings = config["TierData"]
# this key might not be stored in the config file
# if it's not, we need to grab it using AreaCodeData
if 'lad19cds' not in config:
_df = AreaCodeData.process(config)
areacodes = config["lad19cds"]
# Below is assuming inference_period dates
date_low, date_high = get_date_low_high(config)
if settings['format'].lower() == 'tidy':
xarray = TierData.adapt_xarray(df, date_low, date_high, areacodes, settings)
return xarray
def adapt_xarray(tiers, date_low, date_high, lads, settings):
"""
Adapt to a filtered xarray object
"""
tiers["date"] = pd.to_datetime(tiers["date"], format="%Y-%m-%d")
tiers["code"] = merge_lad_codes(tiers["code"])
# Separate out December tiers
date_mask = tiers["date"] > np.datetime64("2020-12-02")
tiers.loc[
date_mask & (tiers["tier"] == "three"),
"tier",
] = "dec_three"
tiers.loc[
date_mask & (tiers["tier"] == "two"),
"tier",
] = "dec_two"
tiers.loc[
date_mask & (tiers["tier"] == "one"),
"tier",
] = "dec_one"
# filter down to the lads
if len(lads) > 0:
tiers = tiers[tiers.code.isin(lads)]
# add in fake LADs to ensure all lockdown tiers are present for filtering
# xarray.loc does not like it when the values aren't present
# this seems to be the cleanest way
# we drop TESTLAD after filtering down
#lockdown_states = ["two", "three", "dec_two", "dec_three"]
lockdown_states = settings['lockdown_states']
for (i, t) in enumerate(lockdown_states):
tiers.loc[tiers.shape[0]+i+1] = ['TESTLAD','TEST','LAD',date_low,t]
index = pd.MultiIndex.from_frame(tiers[["date", "code", "tier"]])
index = index.sort_values()
index = index[~index.duplicated()]
ser = pd.Series(1.0, index=index, name="value")
ser = ser[date_low : (date_high - np.timedelta64(1, "D"))]
xarr = ser.to_xarray()
xarr.data[np.isnan(xarr.data)] = 0.0
xarr_filt = xarr.loc[..., lockdown_states]
xarr_filt = xarr_filt.drop_sel({'code':'TESTLAD'})
return xarr_filt
def process(config):
if config['TierData']['format'].lower()[0:5] == 'lancs':
xarray = TierData.process_lancs(config)
else:
df = TierData.get(config)
xarray = TierData.adapt(df, config)
if TierData.check(xarray, config):
return xarray
def process_lancs(config):
global_settings = config["Global"]
settings = config["TierData"]
if 'lad19cds' not in config:
_df = AreaCodeData.process(config)
areacodes = config["lad19cds"]
date_low, date_high = get_date_low_high(config)
if config['TierData']['format'].lower() == 'lancs_raw':
return LancsData.read_tier_restriction_data(settings['address'], areacodes, date_low, date_high)
elif config['TierData']['format'].lower() == 'lancs_tidy':
return LancsData.read_challen_tier_restriction(settings['address'], date_low, date_high, areacodes)
else:
raise NotImplementedError(f'Format type {config["TierData"]["format"]} not implemented')
class CasesData:
def get(config):
"""
Retrieve a pandas DataFrame containing the cases/line list data.
"""
settings = config['CasesData']
if settings['input'] == 'url':
df = CasesData.getURL(settings['address'],config)
elif settings['input'] == 'csv':
print('Reading case data from local CSV file at',settings['address'])
df = CasesData.getCSV(settings['address'])
elif settings['input'] == 'processed':
print('Reading case data from preprocessed CSV at', settings['address'])
df = pd.read_csv(settings['address'],index_col=0)
else:
invalidInput(settings['input'])
return df
def getURL(url, config):
"""
Placeholder, in case we wish to interface with an API.
"""
pass
def getCSV(file):
"""
Format as per linelisting
"""
columns = ["pillar", "LTLA_code", "specimen_date", "lab_report_date"]
dfs = pd.read_csv(file, chunksize=50000, iterator=True, usecols=columns)
df = pd.concat(dfs)
return df
def check(df, config):
"""
Check that data format seems correct
"""
dims = df.shape
nareas = len(config["lad19cds"])
date_low, date_high = get_date_low_high(config)
dates = pd.date_range(start=date_low,end=date_high,closed="left")
days = len(dates)
entries = days * nareas
if not (((dims[1] >= 3) & (dims[0] == entries)) | ((dims[1] == days) & (dims[0] == nareas))):
raise ValueError("Incorrect CasesData dimensions")
if 'date' in df:
_df = df
elif df.columns.name == 'date':
_df = pd.DataFrame({"date":df.columns})
else:
raise ValueError("Cannot determine date axis")
check_date_bounds(df, date_low, date_high)
check_date_format(df)
check_lad19cd_format(df)
return True
def adapt(df, config):
"""
Adapt the line listing data to the desired dataframe format.
"""
# Extract the yaml config settings
global_settings = config["Global"]
output_settings = config['GenerateOutput']
date_low, date_high = get_date_low_high(config)
settings = config["CasesData"]
pillars = settings["pillars"]
measure = settings["measure"].casefold()
output = settings["output"]
# this key might not be stored in the config file
# if it's not, we need to grab it using AreaCodeData
if 'lad19cds' not in config:
_df = AreaCodeData.process(config)
areacodes = config["lad19cds"]
if settings['input'] == 'processed':
return df
if settings['format'].lower() == 'phe':
df = CasesData.adapt_phe(df, date_low, date_high, pillars, measure, areacodes, output)
if output_settings['storeProcessedInputs'] and output != "None":
output = format_output_filename(output,config)
df.to_csv(output, index=True)
return df
def adapt_phe(df, date_low, date_high, pillars, measure, areacodes, output):
"""
Adapt the line listing data to the desired dataframe format.
"""
# Clean missing values
df.dropna(inplace=True)
df = df.rename(columns = {"LTLA_code":"lad19cd"})
# Clean time formats
df["specimen_date"] = pd.to_datetime(df["specimen_date"], dayfirst=True)
df["lab_report_date"] = pd.to_datetime(df["lab_report_date"], dayfirst=True)
df["lad19cd"] = merge_lad_codes(df["lad19cd"])
# filters for pillars, date ranges, and areacodes if given
filters = df["pillar"].isin(pillars)
filters &= df["lad19cd"].isin(areacodes)
if measure == "specimen":
filters &= (date_low <= df["specimen_date"]) & (df["specimen_date"] < date_high)
else:
filters &= (date_low <= df["lab_report_date"]) & (df["lab_report_date"] < date_high)
df = df[filters]
df = df.drop(columns="pillar") # No longer need pillar column
# Aggregate counts
if measure == "specimen":
df = df.groupby(["specimen_date", "lad19cd"]).count()
df = df.rename(columns = {"lab_report_date":"cases"})
else:
df = df.groupby(["lab_report_date", "lad19cd"]).count()
df = df.rename(columns = {"specimen_date":"cases"})
df.index.names = ["date", "lad19cd"]
# Fill in all dates, and add 0s for empty counts
dates = pd.date_range(date_low, date_high, closed="left")
indexes = [(date, lad19) for date in dates for lad19 in areacodes]
multi_indexes = pd.MultiIndex.from_tuples(indexes, names=["date", "lad19cd"])
results = pd.DataFrame(0, index=multi_indexes, columns=["cases"])
results = results.add(df, axis=0, fill_value=0)
results = results.reset_index()
return results
def process(config):
if config["CasesData"]["format"].lower() == "lancs":
df = CasesData.process_lancs(config)
else:
df = CasesData.get(config)
df = CasesData.adapt(df, config)
if CasesData.check(df, config):
return df
def process_lancs(config):
global_settings = config["Global"]
settings = config["CasesData"]
if 'lad19cds' not in config:
_df = AreaCodeData.process(config)
areacodes = config["lad19cds"]
inference_period = [np.datetime64(x) for x in global_settings["inference_period"]]
date_low = inference_period[0]
date_high = inference_period[1]
if ("Pillar 1" in settings["pillars"]) and ("Pillar 2" in settings["pillars"]):
pillars = "both"
elif ("Pillar 1" in settings["pillars"]):
pillars = "1"
elif ("Pillar 2" in settings["pillars"]):
pillars = "2"
dtype = settings["measure"]
df = LancsData.read_phe_cases(settings['address'], date_low, date_high,
pillar=pillars, date_type=dtype, ltlas = areacodes)
return df.reset_index().melt(['lad19cd']).rename(columns={"value":"cases"})
class MobilityTrendData:
"""
This is the transport data. The fraction of travel compared to normal levels.
"""
def get(config):
"""
Retrieve a response containing the .ods transport data as content.
"""
settings = config['MobilityTrendData']
if settings['input'] == 'url':
df = MobilityTrendData.getURL(settings['address'],config)
elif settings['input'] == 'ods':
print('Reading Transport data from local CSV file at',settings['address'])
df = MobilityTrendData.getODS(settings['address'])
elif settings['input'] == 'processed':
print('Reading Transport data from preprocessed CSV at', settings['address'])
df = pd.read_csv(settings['address'],index_col=0)
df.date = pd.to_datetime(df.date)
else:
invalidInput(settings['input'])
return df
def getURL(url,config):
"""
Utility to extract the URL to the DFT transport .ods data.
"""
settings = config['MobilityTrendData']
response = requests.get(url)
if response.status_code >= HTTPStatus.BAD_REQUEST:
raise RuntimeError(f'Request failed: {response.text}')
if settings['format'].lower() == 'dft':
print("Retrieving transport data from the DfT")
soup = BeautifulSoup(response.text, "html.parser")
href = soup.find("a", {"href":re.compile("COVID-19-transport-use-statistics.ods")}).get("href")
response = requests.get(href, timeout=5)
if response.status_code >= HTTPStatus.BAD_REQUEST:
raise RuntimeError(f'Request failed: {response.text}')
data = io.BytesIO(response.content)
# store the base data
if config['GenerateOutput']['storeInputs']:
fn = format_output_filename(config['GenerateOutput']['scrapedDataDir'] + '/MobilityTrendData_DFT.ods',config)
with open(fn,'wb') as f:
f.write(data.getvalue())
df = MobilityTrendData.getODS(data)
return df
def getODS(file):
"""
Read DfT ODS file
"""
return pd.read_excel(file, sheet_name='Transport_use_(GB)', header=6, engine='odf',
converters={"All motor vehicles2": MobilityTrendData.clean})
def check(df, config):
"""
Check that data format seems correct
Return True if passes
Error if not
"""
dims = df.shape
date_low, date_high = get_date_low_high(config)
dates = pd.date_range(start=date_low,end=date_high,closed="left")
days = len(dates)
if not ((dims[1] >= 1) & (dims[0] == days)): # number of entries
raise ValueError("Incorrect MobilityData dimensions")
# our dates are stored in the index column
# create a new df with just the dates to see
df_date = pd.DataFrame(df.index)
check_date_bounds(df_date, date_low, date_high)
check_date_format(df_date)
return True
def clean(x):
"""
Utility to clean formatting from the table where data has been revised.
"""
if type(x) == str:
return float(x.strip("r%"))/100
else:
return x
def adapt(df, config):
"""
Adapt the transport data to the desired dataframe format.
"""
global_settings = config["Global"]
output_settings = config['GenerateOutput']
date_low, date_high = get_date_low_high(config)
settings = config["MobilityTrendData"]
output = settings["output"]
if settings['input'] == 'processed':
return df
if settings['format'].lower() == 'dft':
df = MobilityTrendData.adapt_dft(df,date_low,date_high,output,config)
if output_settings['storeProcessedInputs'] and output != "None":
output = format_output_filename(output,config)
df.to_csv(output, index=True)
return df
def adapt_dft(df,date_low,date_high,output,config):
"""
Adapt the department for Transport data format to a clean Dataframe
"""
columns = [
"Date1(weekends and bank holidays in grey)",
"All motor vehicles2"
]
colnames = ["date", "percent"]
df = df[columns]
df = df.dropna(0)
df.columns = colnames
df["date"] = df["date"].apply(lambda x: pd.to_datetime(x, dayfirst=True))
mask = (df["date"] >= date_low) & (df["date"] < date_high)
df = df.loc[mask]
# change the index
df.set_index('date',inplace=True)
# set dtype
df.percent = pd.to_numeric(df.percent)
return df
def process(config):
if config['MobilityTrendData']['format'].lower() == "lancs":
df = MobilityTrendData.process_lancs(config)
else:
df = MobilityTrendData.get(config)
df = MobilityTrendData.adapt(df, config)
if MobilityTrendData.check(df, config):
return df
def process_lancs(config):
date_low, date_high = get_date_low_high(config)
return LancsData.read_traffic_flow(
config['MobilityTrendData']['address'],
date_low, date_high)
class PopulationData:
def get(config):
"""
Retrieve a response containing the population data from the ONS.
"""
settings = config['PopulationData']
if settings['input'] == 'url':
df = PopulationData.getURL(settings['address'],config)
elif settings['input'] == 'xls':
print('Reading Pop. data from local XLS file at',settings['address'])
df = PopulationData.getXLS(settings['address'])
elif settings['input'] == 'processed':
print('Reading Pop. data from preprocessed CSV at', settings['address'])
df = | pd.read_csv(settings['address'],index_col=0) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 18 10:44:47 2019
@author: tawanda
"""
import sys
import time
import pandas
import argparse
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
BASE_URL = 'https://azure.microsoft.com/en-us/pricing/calculator/?&OCID=AID2000113_SEM_iaxXnj2c&MarinID=iaxXnj2c_334925916936_azure%20calculator_e_c__67435449310_aud-390212648291:kwd-44260433768&lnkd=Google_Azure_Brand&dclid=CM_g0JGOvuMCFYrO3god5gIH1Q'
NOTHERN_EUROPE = 'europe-north'
LINUX = 'linux'
WINDOWS = 'windows'
ONLY = 'os-only'
STANDARD = 'standard'
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--driver", help="path to chrome driver")
args = parser.parse_args()
if not args.driver:
print("Please enter a valid path to the chrome driver ( --driver argument )")
sys.exit(1)
browser = webdriver.Chrome(executable_path=args.driver)
browser.implicitly_wait(10)
browser.maximize_window()
try:
browser.get(BASE_URL)
products_tab_elements = browser.find_elements_by_xpath('//button[@title="Virtual Machines"]')
# print(f'length of elements = {len(elements)}')
virtual_machines_button = products_tab_elements[0]
virtual_machines_button.click()
time.sleep(5) # TODO replace with Wait func
saved_estimates_tab = browser.find_elements_by_id('estimates-picker')[0]
saved_estimates_tab.click()
input_tag = browser.find_elements_by_xpath('//input[@value ="one-year"]')[0]
input_tag.click()
# Set drop downs
region_tag = browser.find_element_by_xpath(f"//option[@value='{NOTHERN_EUROPE}']")
region_tag.click()
os_tag = browser.find_element_by_xpath(f"//option[@value='{WINDOWS}']")
os_tag.click()
type_tag = browser.find_element_by_xpath(f"//option[@value='{ONLY}']")
type_tag.click()
tier_tag = browser.find_element_by_xpath(f"//option[@value='{STANDARD}']")
tier_tag.click()
# Get all instance name values
all_instances = []
instance_list_elements = browser.find_elements_by_xpath('//*[@id="size"]/option')
for element in instance_list_elements:
element.click()
price = browser.find_elements_by_xpath("//span[@class='numeric']/span")[0].text
instance = {}
instance["Region"] = NOTHERN_EUROPE
instance["OS"] = WINDOWS
instance["Type"] = ONLY
instance["Tier"] = STANDARD
instance["Name"] = element.text.replace("Effective cost per month", "")
instance["Price"] = price
all_instances.append(instance)
prices_df = | pandas.DataFrame(all_instances) | pandas.DataFrame |
# module model
import pandas as pd
from fbprophet import Prophet
import matplotlib.pyplot as plt
from sklearn import metrics, ensemble, model_selection
from sklearn.preprocessing import MinMaxScaler
from math import sqrt
import numpy as np
import datetime
from dateutil import relativedelta
import os
import io
import json
import base64
from xgboost import XGBRegressor
import tensorflow as tf
from tensorflow import keras
from statsmodels.tsa.ar_model import AutoReg
np.random.seed(42)
tf.random.set_seed(42)
def buildProphet(train_data_path, test_data_path):
print("\nBuilding Prophet model ...")
df = pd.read_csv(train_data_path)
df['TIMESTAMP'] = df['TIMESTAMP'].astype('datetime64')
df.set_index('TIMESTAMP',inplace=True)
y = df['RENEWABLES_PCT']
daily = y.resample('24H').mean()
dd = pd.DataFrame(daily)
dd.reset_index(inplace=True)
dd.columns = ['ds','y']
mR = Prophet(daily_seasonality=False)
mR.fit(dd)
futureR=mR.make_future_dataframe(periods=365*5)
forecastR=mR.predict(futureR)
rmse = -1.0
if len(test_data_path) > 0:
dft = pd.read_csv(test_data_path)
dft['TIMESTAMP'] = dft['TIMESTAMP'].astype('datetime64')
dft.set_index('TIMESTAMP',inplace=True)
dft_start_datetime = min(dft.index)
dft_end_datetime = max(dft.index)
actual_mean = dft['RENEWABLES_PCT'].resample('24H').mean()
predicted_mean = forecastR.loc[(forecastR['ds'] >= dft_start_datetime) & (forecastR['ds'] <= dft_end_datetime)]
predicted_mean.set_index('ds', inplace=True)
actual_mean = actual_mean[min(predicted_mean.index):]
mse = metrics.mean_squared_error(actual_mean, predicted_mean.yhat)
rmse = sqrt(mse)
print(str.format("Prophet RMSE: {:.2f}", rmse))
return rmse
def predictProphet(data_path,periods):
print("\nTraining prophet model with full dataset ...")
df = pd.read_csv(data_path)
df['TIMESTAMP'] = df['TIMESTAMP'].astype('datetime64')
df.set_index('TIMESTAMP',inplace=True)
y = df['RENEWABLES_PCT']
daily = y.resample('24H').mean()
dd = pd.DataFrame(daily)
dd.reset_index(inplace=True)
dd.columns = ['ds','y']
m = Prophet(daily_seasonality=False)
m.fit(dd)
future=m.make_future_dataframe(periods=periods)
print(str.format("\nPredicting with prophet model for {0} days ({1} years) ...",periods, int(periods/365)))
plt.subplot(1,1,1)
forecast=m.predict(future)
fig = m.plot(forecast,ylabel='Renewable Power Production %', xlabel='Date')
plt.suptitle('\nCA Predicted Renewable Power Production %')
#plt.title('\nCA Predicted Renewable Power Production %')
axes = plt.gca()
wd = os.path.dirname(data_path) + '/../images'
os.makedirs(wd, exist_ok=True)
fig.savefig(wd + '/prediction-prophet.png')
forecast.rename(columns={'ds':'TIMESTAMP'}, inplace=True)
forecast.set_index('TIMESTAMP',inplace=True)
prediction = pd.DataFrame({'RENEWABLES_PCT_MEAN':forecast['yhat'].resample('1Y').mean(),'RENEWABLES_PCT_LOWER':forecast['yhat_lower'].resample('1Y').mean(),'RENEWABLES_PCT_UPPER':forecast['yhat_upper'].resample('1Y').mean()})
return prediction
def rmse_calc(actual,predict):
predict = np.array(predict)
actual = np.array(actual)
distance = predict - actual
square_distance = distance ** 2
mean_square_distance = square_distance.mean()
score = np.sqrt(mean_square_distance)
return score
def transformDataset(df):
# Add pct from one and two days ago as well as difference in yesterday-1 and yesterday-1
df['YESTERDAY'] = df['RENEWABLES_PCT'].shift()
df['YESTERDAY_DIFF'] = df['YESTERDAY'].diff()
df['YESTERDAY-1']=df['YESTERDAY'].shift()
df['YESTERDAY-1_DIFF'] = df['YESTERDAY-1'].diff()
df=df.dropna()
x_train=pd.DataFrame({'YESTERDAY':df['YESTERDAY'],'YESTERDAY_DIFF':df['YESTERDAY_DIFF'],'YESTERDAY-1':df['YESTERDAY-1'],'YESTERDAY-1_DIFF':df['YESTERDAY-1_DIFF']})
y_train = df['RENEWABLES_PCT']
return x_train,y_train
def buildRandomForestRegression(train_data_path,test_data_path):
print("\nBuilding Random Forest Regression Model ...")
print("Preparing training dataset ...")
df = pd.read_csv(train_data_path)
df['TIMESTAMP'] = df['TIMESTAMP'].astype('datetime64')
df.set_index('TIMESTAMP',inplace=True)
df = df.resample('1M').mean()
x_train, y_train = transformDataset(df)
print("Preparing testing dataset ...")
dt = pd.read_csv(test_data_path)
dt['TIMESTAMP'] = dt['TIMESTAMP'].astype('datetime64')
dt.set_index('TIMESTAMP',inplace=True)
x_test, y_test = transformDataset(dt)
print("Searching for best regressor ...")
model = ensemble.RandomForestRegressor()
param_search = {
'n_estimators': [100],
'max_features': ['auto'],
'max_depth': [10]
}
tscv = model_selection.TimeSeriesSplit(n_splits=2)
rmse_score = metrics.make_scorer(rmse_calc, greater_is_better = False)
gsearch = model_selection.GridSearchCV(estimator=model, cv=tscv, param_grid=param_search, scoring=rmse_score)
gsearch.fit(x_train, y_train)
best_score = gsearch.best_score_
best_model = gsearch.best_estimator_
y_true = y_test.values
print("Predicting with best regressor ...")
y_pred = best_model.predict(x_test)
mse = metrics.mean_squared_error(y_true, y_pred)
rmse = sqrt(mse)
print(str.format("Random Forest Regression RMSE: {:.2f}", rmse))
return rmse
def predictRandomForestRegression(data_path,periods):
print("\nTraining Random Forest Regression model with full dataset ...")
df = pd.read_csv(data_path)
df['TIMESTAMP'] = df['TIMESTAMP'].astype('datetime64')
df.set_index('TIMESTAMP',inplace=True)
dfmean = df.resample('1M').mean()
dfmin = df.resample('1M').min()
dfmax = df.resample('1M').max()
x_train,y_train = transformDataset(dfmean)
xmin_train, ymin_train = transformDataset(dfmin)
xmax_train, ymax_train = transformDataset(dfmax)
model = ensemble.RandomForestRegressor()
model_min = ensemble.RandomForestRegressor()
model_max = ensemble.RandomForestRegressor()
param_search = {
'n_estimators': [100],
'max_features': ['auto'],
'max_depth': [10]
}
tscv = model_selection.TimeSeriesSplit(n_splits=2)
rmse_score = metrics.make_scorer(rmse_calc, greater_is_better = False)
gsearch = model_selection.GridSearchCV(estimator=model, cv=tscv, param_grid=param_search, scoring=rmse_score)
gsearch_min = model_selection.GridSearchCV(estimator=model_min, cv=tscv, param_grid=param_search, scoring=rmse_score)
gsearch_max = model_selection.GridSearchCV(estimator=model_max, cv=tscv, param_grid=param_search, scoring=rmse_score)
gsearch.fit(x_train, y_train)
gsearch_min.fit(xmin_train, ymin_train)
gsearch_max.fit(xmax_train, ymax_train)
best_score = gsearch.best_score_
best_model = gsearch.best_estimator_
best_model_min = gsearch_min.best_estimator_
best_model_max = gsearch_max.best_estimator_
print("\nPredicting with Random Forest regressor ...")
prediction = pd.DataFrame(columns=['TIMESTAMP','RENEWABLES_PCT'])
l = len(x_train)
x_pred = x_train.iloc[[l-1]]
y_pred = best_model.predict(x_pred)
xmin_pred = xmin_train.iloc[[l-1]]
ymin_pred = best_model_min.predict(xmin_pred)
xmax_pred = xmax_train.iloc[[l-1]]
ymax_pred = best_model_max.predict(xmax_pred)
prediction = prediction.append({'TIMESTAMP':x_pred.index[0],'RENEWABLES_PCT_MEAN':y_pred[0],'RENEWABLES_PCT_LOWER':ymin_pred[0],'RENEWABLES_PCT_UPPER':ymax_pred[0]}, ignore_index=True)
for i in range(1,periods):
ti = prediction.iloc[i-1]['TIMESTAMP'] + pd.offsets.DateOffset(months=1)
xi_pred = pd.DataFrame({'YESTERDAY':y_pred,'YESTERDAY_DIFF':y_pred-x_pred['YESTERDAY'],'YESTERDAY-1':x_pred['YESTERDAY'],'YESTERDAY-1_DIFF':x_pred['YESTERDAY_DIFF']})
yi_pred = best_model.predict(xi_pred)
xmini_pred = pd.DataFrame({'YESTERDAY':ymin_pred,'YESTERDAY_DIFF':ymin_pred-xmin_pred['YESTERDAY'],'YESTERDAY-1':xmin_pred['YESTERDAY'],'YESTERDAY-1_DIFF':xmin_pred['YESTERDAY_DIFF']})
ymini_pred = best_model.predict(xmini_pred)
xmaxi_pred = pd.DataFrame({'YESTERDAY':ymax_pred,'YESTERDAY_DIFF':ymax_pred-xmax_pred['YESTERDAY'],'YESTERDAY-1':xmax_pred['YESTERDAY'],'YESTERDAY-1_DIFF':xmax_pred['YESTERDAY_DIFF']})
ymaxi_pred = best_model.predict(xmaxi_pred)
prediction = prediction.append({'TIMESTAMP':ti,'RENEWABLES_PCT_MEAN':yi_pred[0],'RENEWABLES_PCT_LOWER':ymini_pred[0],'RENEWABLES_PCT_UPPER':ymaxi_pred[0]}, ignore_index=True)
x_pred = xi_pred
y_pred = yi_pred
xmin_pred = xmini_pred
ymin_pred = ymini_pred
xmax_pred = xmaxi_pred
ymax_pred = ymaxi_pred
prediction.set_index('TIMESTAMP',inplace=True)
prediction = prediction.resample('1Y').mean()
p = prediction.plot()
p.set_title('CA Predicted Renewables % by Random Forest Regression')
p.set_ylabel('Renewables %')
wd = os.path.dirname(data_path) + '/../images'
os.makedirs(wd, exist_ok=True)
plt.savefig(wd + '/prediction-randomforest.png')
return prediction
# transform a time series dataset into a supervised learning dataset
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = | pd.DataFrame(data) | pandas.DataFrame |
"""Tools for generating and forecasting with ensembles of models."""
import datetime
import numpy as np
import pandas as pd
import json
from autots.models.base import PredictionObject
from autots.models.model_list import no_shared
from autots.tools.impute import fill_median
horizontal_aliases = ['horizontal', 'probabilistic']
def summarize_series(df):
"""Summarize time series data. For now just df.describe()."""
df_sum = df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9])
return df_sum
def mosaic_or_horizontal(all_series: dict):
"""Take a mosaic or horizontal model and return series or models.
Args:
all_series (dict): dict of series: model (or list of models)
"""
first_value = all_series[next(iter(all_series))]
if isinstance(first_value, dict):
return "mosaic"
else:
return "horizontal"
def parse_horizontal(all_series: dict, model_id: str = None, series_id: str = None):
"""Take a mosaic or horizontal model and return series or models.
Args:
all_series (dict): dict of series: model (or list of models)
model_id (str): name of model to find series for
series_id (str): name of series to find models for
Returns:
list
"""
if model_id is None and series_id is None:
raise ValueError(
"either series_id or model_id must be specified in parse_horizontal."
)
if mosaic_or_horizontal(all_series) == 'mosaic':
if model_id is not None:
return [ser for ser, mod in all_series.items() if model_id in mod.values()]
else:
return list(set(all_series[series_id].values()))
else:
if model_id is not None:
return [ser for ser, mod in all_series.items() if mod == model_id]
else:
# list(set([mod for ser, mod in all_series.items() if ser == series_id]))
return [all_series[series_id]]
def BestNEnsemble(
ensemble_params,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime: dict,
prediction_interval: float = 0.9,
):
"""Generate mean forecast for ensemble of models.
Args:
ensemble_params (dict): BestN ensemble param dict
should have "model_weights": {model_id: weight} where 1 is default weight per model
forecasts (dict): {forecast_id: forecast dataframe} for all models
same for lower_forecasts, upper_forecasts
forecast_runtime (dict): dictionary of {forecast_id: timedelta of runtime}
prediction_interval (float): metadata on interval
"""
startTime = datetime.datetime.now()
forecast_keys = list(forecasts.keys())
model_weights = dict(ensemble_params.get("model_weights", {}))
ensemble_params['model_weights'] = model_weights
ensemble_params['models'] = {
k: v
for k, v in dict(ensemble_params.get('models')).items()
if k in forecast_keys
}
model_count = len(forecast_keys)
if model_count < 1:
raise ValueError("BestN failed, no component models available.")
sample_df = next(iter(forecasts.values()))
columnz = sample_df.columns
indices = sample_df.index
model_divisor = 0
ens_df = pd.DataFrame(0, index=indices, columns=columnz)
ens_df_lower = pd.DataFrame(0, index=indices, columns=columnz)
ens_df_upper = pd.DataFrame(0, index=indices, columns=columnz)
for idx, x in forecasts.items():
current_weight = float(model_weights.get(idx, 1))
ens_df = ens_df + (x * current_weight)
# also .get(idx, 0)
ens_df_lower = ens_df_lower + (lower_forecasts[idx] * current_weight)
ens_df_upper = ens_df_upper + (upper_forecasts[idx] * current_weight)
model_divisor = model_divisor + current_weight
ens_df = ens_df / model_divisor
ens_df_lower = ens_df_lower / model_divisor
ens_df_upper = ens_df_upper / model_divisor
ens_runtime = datetime.timedelta(0)
for x in forecasts_runtime.values():
ens_runtime = ens_runtime + x
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.datetime.now() - startTime,
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def DistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate forecast for distance ensemble."""
# handle that the inputs are now dictionaries
forecasts = list(forecasts.values())
lower_forecasts = list(lower_forecasts.values())
upper_forecasts = list(upper_forecasts.values())
forecasts_runtime = list(forecasts_runtime.values())
first_model_index = forecasts_list.index(ensemble_params['FirstModel'])
second_model_index = forecasts_list.index(ensemble_params['SecondModel'])
forecast_length = forecasts[0].shape[0]
dis_frac = ensemble_params['dis_frac']
first_bit = int(np.ceil(forecast_length * dis_frac))
second_bit = int(np.floor(forecast_length * (1 - dis_frac)))
ens_df = (
forecasts[first_model_index]
.head(first_bit)
.append(forecasts[second_model_index].tail(second_bit))
)
ens_df_lower = (
lower_forecasts[first_model_index]
.head(first_bit)
.append(lower_forecasts[second_model_index].tail(second_bit))
)
ens_df_upper = (
upper_forecasts[first_model_index]
.head(first_bit)
.append(upper_forecasts[second_model_index].tail(second_bit))
)
id_list = list(ensemble_params['models'].keys())
model_indexes = [idx for idx, x in enumerate(forecasts_list) if x in id_list]
ens_runtime = datetime.timedelta(0)
for idx, x in enumerate(forecasts_runtime):
if idx in model_indexes:
ens_runtime = ens_runtime + forecasts_runtime[idx]
ens_result_obj = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result_obj
def horizontal_classifier(df_train, known: dict, method: str = "whatever"):
"""
CLassify unknown series with the appropriate model for horizontal ensembling.
Args:
df_train (pandas.DataFrame): historical data about the series. Columns = series_ids.
known (dict): dict of series_id: classifier outcome including some but not all series in df_train.
Returns:
dict.
"""
# known = {'EXUSEU': 'xx1', 'MCOILWTICO': 'xx2', 'CSUSHPISA': 'xx3'}
columnz = df_train.columns.tolist()
X = summarize_series(df_train).transpose()
X = fill_median(X)
known_l = list(known.keys())
unknown = list(set(columnz) - set(known_l))
Xt = X.loc[known_l]
Xf = X.loc[unknown]
Y = np.array(list(known.values()))
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(Xt, Y)
result = clf.predict(Xf)
result_d = dict(zip(Xf.index.tolist(), result))
# since this only has estimates, overwrite with known that includes more
final = {**result_d, **known}
# temp = pd.DataFrame({'series': list(final.keys()), 'model': list(final.values())})
# temp2 = temp.merge(X, left_on='series', right_index=True)
return final
def mosaic_classifier(df_train, known):
"""CLassify unknown series with the appropriate model for mosaic ensembles."""
known.index.name = "forecast_period"
upload = pd.melt(
known,
var_name="series_id",
value_name="model_id",
ignore_index=False,
).reset_index(drop=False)
upload['forecast_period'] = upload['forecast_period'].astype(int)
missing_cols = df_train.columns[
~df_train.columns.isin(upload['series_id'].unique())
]
if not missing_cols.empty:
forecast_p = np.arange(upload['forecast_period'].max() + 1)
p_full = np.tile(forecast_p, len(missing_cols))
missing_rows = pd.DataFrame(
{
'forecast_period': p_full,
'series_id': np.repeat(missing_cols.values, len(forecast_p)),
'model_id': np.nan,
},
index=None if len(p_full) > 1 else [0],
)
upload = pd.concat([upload, missing_rows])
X = fill_median(
(summarize_series(df_train).transpose()).merge(
upload, left_index=True, right_on="series_id"
)
)
X.set_index("series_id", inplace=True) # .drop(columns=['series_id'], inplace=True)
to_predict = X[X['model_id'].isna()].drop(columns=['model_id'])
X = X[~X['model_id'].isna()]
Y = X['model_id']
Xf = X.drop(columns=['model_id'])
# from sklearn.linear_model import RidgeClassifier
# from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(Xf, Y)
predicted = clf.predict(to_predict)
result = pd.concat(
[to_predict.reset_index(drop=False), pd.Series(predicted, name="model_id")],
axis=1,
)
cols_needed = ['model_id', 'series_id', 'forecast_period']
final = pd.concat(
[X.reset_index(drop=False)[cols_needed], result[cols_needed]], sort=True, axis=0
)
final['forecast_period'] = final['forecast_period'].astype(str)
final = final.pivot(values="model_id", columns="series_id", index="forecast_period")
try:
final = final[df_train.columns]
if final.isna().to_numpy().sum() > 0:
raise KeyError("NaN in mosaic generalization")
except KeyError as e:
raise ValueError(
f"mosaic_classifier failed to generalize for all columns: {repr(e)}"
)
return final
def generalize_horizontal(
df_train, known_matches: dict, available_models: list, full_models: list = None
):
"""generalize a horizontal model trained on a subset of all series
Args:
df_train (pd.DataFrame): time series data
known_matches (dict): series:model dictionary for some to all series
available_models (dict): list of models actually available
full_models (dict): models that are available for every single series
"""
org_idx = df_train.columns
org_list = org_idx.tolist()
# remove any unnecessary series
known_matches = {ser: mod for ser, mod in known_matches.items() if ser in org_list}
# here split for mosaic or horizontal
if mosaic_or_horizontal(known_matches) == "mosaic":
# make it a dataframe
mosaicy = pd.DataFrame.from_dict(known_matches)
# remove unavailable models
mosaicy = pd.DataFrame(mosaicy[mosaicy.isin(available_models)])
# so we can fill some missing by just using a forward fill, should be good enough
mosaicy.fillna(method='ffill', limit=5, inplace=True)
mosaicy.fillna(method='bfill', limit=5, inplace=True)
if mosaicy.isna().any().any() or mosaicy.shape[1] != df_train.shape[1]:
if full_models is not None:
k2 = pd.DataFrame(mosaicy[mosaicy.isin(full_models)])
else:
k2 = mosaicy.copy()
final = mosaic_classifier(df_train, known=k2)
return final.to_dict()
else:
return mosaicy.to_dict()
else:
# remove any unavailable models
k = {ser: mod for ser, mod in known_matches.items() if mod in available_models}
# check if any series are missing from model list
if not k:
raise ValueError("Horizontal template has no models matching this data!")
# test if generalization is needed
if len(set(org_list) - set(list(k.keys()))) > 0:
# filter down to only models available for all
# print(f"Models not available: {[ser for ser, mod in known_matches.items() if mod not in available_models]}")
# print(f"Series not available: {[ser for ser in df_train.columns if ser not in list(known_matches.keys())]}")
if full_models is not None:
k2 = {ser: mod for ser, mod in k.items() if mod in full_models}
else:
k2 = k.copy()
all_series_part = horizontal_classifier(df_train, k2)
# since this only has "full", overwrite with known that includes more
all_series = {**all_series_part, **k}
else:
all_series = known_matches
return all_series
def HorizontalEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=None,
prematched_series: dict = None,
):
"""Generate forecast for per_series ensembling."""
startTime = datetime.datetime.now()
# this is meant to fill in any failures
available_models = [mod for mod, fcs in forecasts.items() if fcs.shape[0] > 0]
train_size = df_train.shape
# print(f"running inner generalization with training size: {train_size}")
full_models = [
mod for mod, fcs in forecasts.items() if fcs.shape[1] == train_size[1]
]
if not full_models:
print("No full models available for horizontal generalization!")
full_models = available_models # hope it doesn't need to fill
# print(f"FULLMODEL {len(full_models)}: {full_models}")
if prematched_series is None:
prematched_series = ensemble_params['series']
all_series = generalize_horizontal(
df_train, prematched_series, available_models, full_models
)
# print(f"ALLSERIES {len(all_series.keys())}: {all_series}")
org_idx = df_train.columns
forecast_df, u_forecast_df, l_forecast_df = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in all_series.items():
try:
c_fore = forecasts[mod_id][series]
forecast_df = pd.concat([forecast_df, c_fore], axis=1)
except Exception as e:
print(f"Horizontal ensemble unable to add model {mod_id} {repr(e)}")
# upper
c_fore = upper_forecasts[mod_id][series]
u_forecast_df = pd.concat([u_forecast_df, c_fore], axis=1)
# lower
c_fore = lower_forecasts[mod_id][series]
l_forecast_df = pd.concat([l_forecast_df, c_fore], axis=1)
# make sure columns align to original
forecast_df = forecast_df.reindex(columns=org_idx)
u_forecast_df = u_forecast_df.reindex(columns=org_idx)
l_forecast_df = l_forecast_df.reindex(columns=org_idx)
# combine runtimes
try:
ens_runtime = sum(list(forecasts_runtime.values()), datetime.timedelta())
except Exception:
ens_runtime = datetime.timedelta(0)
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(forecast_df.index),
forecast_index=forecast_df.index,
forecast_columns=forecast_df.columns,
lower_forecast=l_forecast_df,
forecast=forecast_df,
upper_forecast=u_forecast_df,
prediction_interval=prediction_interval,
predict_runtime=datetime.datetime.now() - startTime,
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def HDistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate forecast for per_series per distance ensembling."""
# handle that the inputs are now dictionaries
forecasts = list(forecasts.values())
lower_forecasts = list(lower_forecasts.values())
upper_forecasts = list(upper_forecasts.values())
forecasts_runtime = list(forecasts_runtime.values())
id_list = list(ensemble_params['models'].keys())
mod_dic = {x: idx for idx, x in enumerate(forecasts_list) if x in id_list}
forecast_length = forecasts[0].shape[0]
dist_n = int(np.ceil(ensemble_params['dis_frac'] * forecast_length))
dist_last = forecast_length - dist_n
forecast_df, u_forecast_df, l_forecast_df = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in ensemble_params['series1'].items():
l_idx = mod_dic[mod_id]
try:
c_fore = forecasts[l_idx][series]
forecast_df = pd.concat([forecast_df, c_fore], axis=1)
except Exception as e:
repr(e)
print(forecasts[l_idx].columns)
print(forecasts[l_idx].head())
# upper
c_fore = upper_forecasts[l_idx][series]
u_forecast_df = pd.concat([u_forecast_df, c_fore], axis=1)
# lower
c_fore = lower_forecasts[l_idx][series]
l_forecast_df = pd.concat([l_forecast_df, c_fore], axis=1)
forecast_df2, u_forecast_df2, l_forecast_df2 = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in ensemble_params['series2'].items():
l_idx = mod_dic[mod_id]
try:
c_fore = forecasts[l_idx][series]
forecast_df2 = pd.concat([forecast_df2, c_fore], axis=1)
except Exception as e:
repr(e)
print(forecasts[l_idx].columns)
print(forecasts[l_idx].head())
# upper
c_fore = upper_forecasts[l_idx][series]
u_forecast_df2 = pd.concat([u_forecast_df2, c_fore], axis=1)
# lower
c_fore = lower_forecasts[l_idx][series]
l_forecast_df2 = pd.concat([l_forecast_df2, c_fore], axis=1)
forecast_df = pd.concat(
[forecast_df.head(dist_n), forecast_df2.tail(dist_last)], axis=0
)
u_forecast_df = pd.concat(
[u_forecast_df.head(dist_n), u_forecast_df2.tail(dist_last)], axis=0
)
l_forecast_df = pd.concat(
[l_forecast_df.head(dist_n), l_forecast_df2.tail(dist_last)], axis=0
)
ens_runtime = datetime.timedelta(0)
for idx, x in enumerate(forecasts_runtime):
if idx in list(mod_dic.values()):
ens_runtime = ens_runtime + forecasts_runtime[idx]
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(forecast_df.index),
forecast_index=forecast_df.index,
forecast_columns=forecast_df.columns,
lower_forecast=l_forecast_df,
forecast=forecast_df,
upper_forecast=u_forecast_df,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def EnsembleForecast(
ensemble_str,
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=None,
prematched_series: dict = None,
):
"""Return PredictionObject for given ensemble method."""
ens_model_name = ensemble_params['model_name'].lower().strip()
s3list = ['best3', 'best3horizontal', 'bestn']
if ens_model_name in s3list:
ens_forecast = BestNEnsemble(
ensemble_params,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
elif ens_model_name == 'dist':
ens_forecast = DistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
elif ens_model_name in horizontal_aliases:
ens_forecast = HorizontalEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=df_train,
prematched_series=prematched_series,
)
return ens_forecast
elif ens_model_name == "mosaic":
ens_forecast = MosaicEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=df_train,
prematched_series=prematched_series,
)
return ens_forecast
elif ens_model_name == 'hdist':
ens_forecast = HDistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
else:
raise ValueError("Ensemble model type not recognized.")
def _generate_distance_ensemble(dis_frac, forecast_length, initial_results):
"""Constructs a distance ensemble dictionary."""
dis_frac = 0.5
first_bit = int(np.ceil(forecast_length * dis_frac))
last_bit = int(np.floor(forecast_length * (1 - dis_frac)))
not_ens_list = initial_results.model_results[
initial_results.model_results['Ensemble'] == 0
]['ID'].tolist()
ens_per_ts = initial_results.per_timestamp_smape[
initial_results.per_timestamp_smape.index.isin(not_ens_list)
]
first_model = ens_per_ts.iloc[:, 0:first_bit].mean(axis=1).idxmin()
last_model = (
ens_per_ts.iloc[:, first_bit : (last_bit + first_bit)].mean(axis=1).idxmin()
)
ensemble_models = {}
best3 = (
initial_results.model_results[
initial_results.model_results['ID'].isin([first_model, last_model])
]
.drop_duplicates(
subset=['Model', 'ModelParameters', 'TransformationParameters']
)
.set_index("ID")[['Model', 'ModelParameters', 'TransformationParameters']]
)
ensemble_models = best3.to_dict(orient='index')
return {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': 'Dist',
'model_count': 2,
'model_metric': 'smape',
'models': ensemble_models,
'dis_frac': dis_frac,
'FirstModel': first_model,
'SecondModel': last_model,
}
),
'TransformationParameters': '{}',
'Ensemble': 1,
}
def _generate_bestn_dict(
best,
model_name: str = 'BestN',
model_metric: str = "best_score",
model_weights: dict = None,
):
ensemble_models = best.to_dict(orient='index')
model_parms = {
'model_name': model_name,
'model_count': best.shape[0],
'model_metric': model_metric,
'models': ensemble_models,
}
if model_weights is not None:
model_parms['model_weights'] = model_weights
return {
'Model': 'Ensemble',
'ModelParameters': json.dumps(model_parms),
'TransformationParameters': '{}',
'Ensemble': 1,
}
def EnsembleTemplateGenerator(
initial_results,
forecast_length: int = 14,
ensemble: str = "simple",
score_per_series=None,
):
"""Generate class 1 (non-horizontal) ensemble templates given a table of results."""
ensemble_templates = pd.DataFrame()
ens_temp = initial_results.model_results.drop_duplicates(subset='ID')
# filter out horizontal ensembles
ens_temp = ens_temp[ens_temp['Ensemble'] <= 1]
if 'simple' in ensemble:
# best 3, all can be of same model type
best3nonunique = ens_temp.nsmallest(3, columns=['Score']).set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
n_models = best3nonunique.shape[0]
if n_models == 3:
best3nu_params = pd.DataFrame(
_generate_bestn_dict(
best3nonunique, model_name='BestN', model_metric="best_score"
),
index=[0],
)
ensemble_templates = pd.concat([ensemble_templates, best3nu_params], axis=0)
# best 3, by SMAPE, RMSE, SPL
bestsmape = ens_temp.nsmallest(1, columns=['smape_weighted'])
bestrmse = ens_temp.nsmallest(2, columns=['rmse_weighted'])
bestmae = ens_temp.nsmallest(3, columns=['spl_weighted'])
best3metric = pd.concat([bestsmape, bestrmse, bestmae], axis=0)
best3metric = (
best3metric.drop_duplicates()
.head(3)
.set_index("ID")[['Model', 'ModelParameters', 'TransformationParameters']]
)
n_models = best3metric.shape[0]
if n_models == 3:
best3m_params = pd.DataFrame(
_generate_bestn_dict(
best3metric, model_name='BestN', model_metric="mixed_metric"
),
index=[0],
)
ensemble_templates = pd.concat([ensemble_templates, best3m_params], axis=0)
# best 3, all must be of different model types
ens_temp = (
ens_temp.sort_values('Score', ascending=True, na_position='last')
.groupby('Model')
.head(1)
.reset_index(drop=True)
)
best3unique = ens_temp.nsmallest(3, columns=['Score']).set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
n_models = best3unique.shape[0]
if n_models == 3:
best3u_params = pd.DataFrame(
_generate_bestn_dict(
best3unique, model_name='BestN', model_metric="best_score_unique"
),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, best3u_params], axis=0, ignore_index=True
)
if 'distance' in ensemble:
dis_frac = 0.2
distance_params = pd.DataFrame(
_generate_distance_ensemble(dis_frac, forecast_length, initial_results),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, distance_params], axis=0, ignore_index=True
)
dis_frac = 0.5
distance_params2 = pd.DataFrame(
_generate_distance_ensemble(dis_frac, forecast_length, initial_results),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, distance_params2], axis=0, ignore_index=True
)
# in previous versions per_series metrics were only captured if 'horizontal' was passed
if 'simple' in ensemble:
if score_per_series is None:
per_series = initial_results.per_series_mae
else:
per_series = score_per_series
per_series = per_series[per_series.index.isin(ens_temp['ID'].tolist())]
# make it ranking based! Need bigger=better for weighting
per_series_ranked = per_series.rank(ascending=False)
# choose best n based on score per series
n = 3
chosen_ones = per_series_ranked.sum(axis=1).nlargest(n)
bestn = ens_temp[ens_temp['ID'].isin(chosen_ones.index.tolist())].set_index(
"ID"
)[['Model', 'ModelParameters', 'TransformationParameters']]
n_models = bestn.shape[0]
if n_models == n:
best3u_params = pd.DataFrame(
_generate_bestn_dict(
bestn,
model_name='BestN',
model_metric="bestn_horizontal",
model_weights=chosen_ones.to_dict(),
),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, best3u_params], axis=0, ignore_index=True
)
# cluster and then make best model per cluster
if per_series.shape[1] > 4:
try:
from sklearn.cluster import AgglomerativeClustering
max_clusters = 8
n_clusters = round(per_series.shape[1] / 3)
n_clusters = max_clusters if n_clusters > max_clusters else n_clusters
X = per_series_ranked.transpose()
clstr = AgglomerativeClustering(n_clusters=n_clusters).fit(X)
series_labels = clstr.labels_
for cluster in np.unique(series_labels).tolist():
current_ps = per_series_ranked[
per_series_ranked.columns[series_labels == cluster]
]
n = 3
chosen_ones = current_ps.sum(axis=1).nlargest(n)
bestn = ens_temp[
ens_temp['ID'].isin(chosen_ones.index.tolist())
].set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
n_models = bestn.shape[0]
if n_models == n:
best3u_params = pd.DataFrame(
_generate_bestn_dict(
bestn,
model_name='BestN',
model_metric=f"cluster_{cluster}",
model_weights=chosen_ones.to_dict(),
),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, best3u_params],
axis=0,
ignore_index=True,
)
except Exception as e:
print(f"cluster-based simple ensemble failed with {repr(e)}")
mods = pd.Series()
per_series_des = per_series.copy()
n_models = 3
# choose best per series, remove those series, then choose next best
for x in range(n_models):
n_dep = 5 if x < 2 else 10
n_dep = (
n_dep if per_series_des.shape[0] > n_dep else per_series_des.shape[0]
)
models_pos = []
tr_df = pd.DataFrame()
for _ in range(n_dep):
cr_df = pd.DataFrame(per_series_des.idxmin()).transpose()
tr_df = pd.concat([tr_df, cr_df], axis=0)
models_pos.extend(per_series_des.idxmin().tolist())
per_series_des[per_series_des == per_series_des.min()] = np.nan
cur_mods = pd.Series(models_pos).value_counts()
cur_mods = cur_mods.sort_values(ascending=False).head(1)
mods = mods.combine(cur_mods, max, fill_value=0)
rm_cols = tr_df[tr_df.isin(mods.index.tolist())]
rm_cols = rm_cols.dropna(how='all', axis=1).columns
per_series_des = per_series.copy().drop(mods.index, axis=0)
per_series_des = per_series_des.drop(rm_cols, axis=1)
if per_series_des.shape[1] == 0:
per_series_des = per_series.copy().drop(mods.index, axis=0)
best3 = (
initial_results.model_results[
initial_results.model_results['ID'].isin(mods.index.tolist())
]
.drop_duplicates(
subset=['Model', 'ModelParameters', 'TransformationParameters']
)
.set_index("ID")[['Model', 'ModelParameters', 'TransformationParameters']]
)
best3_params = pd.DataFrame(
_generate_bestn_dict(best3, model_name='BestN', model_metric="horizontal"),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, best3_params], axis=0, ignore_index=True
)
if 'subsample' in ensemble:
try:
import random
if score_per_series is None:
per_series = initial_results.per_series_mae
else:
per_series = score_per_series
per_series = per_series[per_series.index.isin(ens_temp['ID'].tolist())]
# make it ranking based! Need bigger=better for weighting
per_series_ranked = per_series.rank(ascending=False)
# subsample and then make best model per group
num_series = per_series.shape[1]
n_samples = num_series * 2
max_deep_ensembles = 100
n_samples = (
n_samples if n_samples < max_deep_ensembles else max_deep_ensembles
)
col_min = 1 if num_series < 3 else 2
col_max = round(num_series / 2)
col_max = num_series if col_max > num_series else col_max
for samp in range(n_samples):
n_cols = random.randint(col_min, col_max)
current_ps = per_series_ranked.sample(n=n_cols, axis=1)
n_largest = random.randint(9, 16)
n_sample = random.randint(2, 5)
# randomly choose one of best models
chosen_ones = current_ps.sum(axis=1).nlargest(n_largest)
n_sample = (
n_sample
if n_sample < chosen_ones.shape[0]
else chosen_ones.shape[0]
)
chosen_ones = chosen_ones.sample(n_sample).sort_values(ascending=False)
bestn = ens_temp[
ens_temp['ID'].isin(chosen_ones.index.tolist())
].set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
model_weights = random.choice([chosen_ones.to_dict(), None])
best3u_params = pd.DataFrame(
_generate_bestn_dict(
bestn,
model_name='BestN',
model_metric=f"subsample_{samp}",
model_weights=model_weights,
),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, best3u_params], axis=0, ignore_index=True
)
except Exception as e:
print(f"subsample ensembling failed with error: {repr(e)}")
return ensemble_templates
def HorizontalTemplateGenerator(
per_series,
model_results,
forecast_length: int = 14,
ensemble: str = "horizontal",
subset_flag: bool = True,
per_series2=None,
):
"""Generate horizontal ensemble templates given a table of results."""
ensemble_templates = pd.DataFrame()
ensy = ['horizontal', 'probabilistic', 'hdist']
if any(x in ensemble for x in ensy):
if ('horizontal-max' in ensemble) or ('probabilistic-max' in ensemble):
mods_per_series = per_series.idxmin()
mods = mods_per_series.unique()
best5 = (
model_results[model_results['ID'].isin(mods.tolist())]
.drop_duplicates(
subset=['Model', 'ModelParameters', 'TransformationParameters']
)
.set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
)
nomen = 'Horizontal' if 'horizontal' in ensemble else 'Probabilistic'
metric = 'Score-max' if 'horizontal' in ensemble else 'SPL'
best5_params = {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': nomen,
'model_count': mods.shape[0],
'model_metric': metric,
'models': best5.to_dict(orient='index'),
'series': mods_per_series.to_dict(),
}
),
'TransformationParameters': '{}',
'Ensemble': 2,
}
best5_params = pd.DataFrame(best5_params, index=[0])
ensemble_templates = pd.concat(
[ensemble_templates, best5_params], axis=0, ignore_index=True
)
if 'hdist' in ensemble and not subset_flag:
mods_per_series = per_series.idxmin()
mods_per_series2 = per_series2.idxmin()
mods = pd.concat([mods_per_series, mods_per_series2]).unique()
best5 = (
model_results[model_results['ID'].isin(mods.tolist())]
.drop_duplicates(
subset=['Model', 'ModelParameters', 'TransformationParameters']
)
.set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
)
nomen = 'hdist'
best5_params = {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': nomen,
'model_count': mods.shape[0],
'models': best5.to_dict(orient='index'),
'dis_frac': 0.3,
'series1': mods_per_series.to_dict(),
'series2': mods_per_series2.to_dict(),
}
),
'TransformationParameters': '{}',
'Ensemble': 2,
}
best5_params = pd.DataFrame(best5_params, index=[0])
ensemble_templates = pd.concat(
[ensemble_templates, best5_params], axis=0, ignore_index=True
)
if ('horizontal' in ensemble) or ('probabilistic' in ensemble):
# first generate lists of models by ID that are in shared and no_shared
no_shared_select = model_results['Model'].isin(no_shared)
shared_mod_lst = model_results[~no_shared_select]['ID'].tolist()
no_shared_mod_lst = model_results[no_shared_select]['ID'].tolist()
lowest_score_mod = [
model_results.iloc[model_results['Score'].idxmin()]['ID']
]
per_series[per_series.index.isin(shared_mod_lst)]
# remove those where idxmin is in no_shared
shared_maxes = per_series.idxmin().isin(shared_mod_lst)
shr_mx_cols = shared_maxes[shared_maxes].index
per_series_shareds = per_series.filter(shr_mx_cols, axis=1)
# select best n shared models (NEEDS improvement)
n_md = 5
use_shared_lst = (
per_series_shareds.median(axis=1).nsmallest(n_md).index.tolist()
)
# combine all of the above as allowed mods
allowed_list = no_shared_mod_lst + lowest_score_mod + use_shared_lst
per_series_filter = per_series[per_series.index.isin(allowed_list)]
# first select a few of the best shared models
# Option A: Best overall per model type (by different metrics?)
# Option B: Best per different clusters...
# Rank position in score for EACH series
# Lowest median ranking
# Lowest Quartile 1 of rankings
# Normalize and then take Min, Median, or IQ1
# then choose min from series of these + no_shared
# make sure no models are included that don't match to any series
# ENSEMBLE and NO_SHARED (it could be or it could not be)
# need to TEST cases where all columns are either shared or no_shared!
# concern: choose lots of models, slower to run initial
mods_per_series = per_series_filter.idxmin()
mods = mods_per_series.unique()
best5 = (
model_results[model_results['ID'].isin(mods.tolist())]
.drop_duplicates(
subset=['Model', 'ModelParameters', 'TransformationParameters']
)
.set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
)
nomen = 'Horizontal' if 'horizontal' in ensemble else 'Probabilistic'
metric = 'Score' if 'horizontal' in ensemble else 'SPL'
best5_params = {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': nomen,
'model_count': mods.shape[0],
'model_metric': metric,
'models': best5.to_dict(orient='index'),
'series': mods_per_series.to_dict(),
}
),
'TransformationParameters': '{}',
'Ensemble': 2,
}
best5_params = pd.DataFrame(best5_params, index=[0])
ensemble_templates = pd.concat(
[ensemble_templates, best5_params], axis=0, ignore_index=True
)
if ('horizontal-min' in ensemble) or ('probabilistic-min' in ensemble):
mods = pd.Series()
per_series_des = per_series.copy()
n_models = 15
# choose best per series, remove those series, then choose next best
for x in range(n_models):
n_dep = x + 1
n_dep = (
n_dep
if per_series_des.shape[0] > n_dep
else per_series_des.shape[0]
)
models_pos = []
tr_df = pd.DataFrame()
for _ in range(n_dep):
cr_df = pd.DataFrame(per_series_des.idxmin()).transpose()
tr_df = pd.concat([tr_df, cr_df], axis=0)
models_pos.extend(per_series_des.idxmin().tolist())
per_series_des[per_series_des == per_series_des.min()] = np.nan
cur_mods = | pd.Series(models_pos) | pandas.Series |
import re
from datetime import datetime, timezone
import pandas as pd
import numpy as np
"""
Created on Thu Feb 27 14:05:58 2020
@author: <NAME>
Partly Adopted from Meng Cai
A few functions for processing text data.
"""
def import_comment(file, text_column):
"""
Load a csv file with survey comments,
remove rows with empty text or illegible words or N/A answer.
Input:
file -- the name of a csv file;
text_column -- the name of the target text column.
Output:
a Pandas dataframe.
"""
raw = | pd.read_csv(file, encoding="ISO-8859-1") | pandas.read_csv |
import gzip
import json
from enum import Enum
from typing import Optional, Dict, Union, Iterator, Set, List
from problog.logic import Clause as ProblogClause, Term as ProblogTerm
from pylo.language.lp import Clause as PyloClause, Literal as PyloLiteral
from pylo.language.lp import global_context as pylo_global_context
import pandas as pd
from kbc_pul.amie.amie_rule_string_to_problog_clause_conversion import DatalogAMIERuleConvertor
from kbc_pul.prolog_utils.problog_to_pylo_conversion import convert_clause
from kbc_pul.confidence_naming import ConfidenceEnum
class AmieOutputKeyEnum(Enum):
RULE = "Rule"
HEAD_COVERAGE = 'Head Coverage' # rule support / head relation size in observed KB
STD_CONF = 'Std Confidence'
PCA_CONF = 'PCA Confidence'
N_POSITIVE_EXAMPLES = 'Positive Examples' # rule support, i.e. nb of predictions supported by the rule
BODY_SIZE = 'Body size'
PCA_BODY_SIZE = 'PCA Body size'
FUNCTIONAL_VARIABLE = 'Functional variable'
columns_header_without_amie = ["Rule", 'Nb supported predictions', 'Body size'] + [
conf.get_name()
for conf in ConfidenceEnum
]
class RuleWrapper:
def __init__(self, rule: PyloClause, o_amie_dict: Optional[Dict] = None):
"""
:param rule:
:param o_amie_dict:
"""
self.rule: PyloClause = rule
self.amie_dict: Optional[Dict] = o_amie_dict
# BODY support
# # predictions
self.o_n_predictions: Optional[int] = None # a.k.a. the body support
# RULE support
# # predictions in the observed KB
self.o_n_supported_predictions: Optional[int] = None # a.k.a the 'Positive predictions' from AMIE
self.o_std_confidence: Optional[float] = None
self.o_pca_confidence_subject_to_object: Optional[float] = None
self.o_pca_confidence_object_to_subject: Optional[float] = None
self.o_c_weighted_std_conf: Optional[float] = None
self.o_relative_pu_confidence_unbiased: Optional[float] = None
self.o_relative_pu_confidence_pca_subject_to_object: Optional[float] = None
self.o_relative_pu_confidence_pca_object_to_subject: Optional[float] = None
# self.o_absolute_pu_confidence: Optional[float] = None
self.o_true_confidence: Optional[float] = None
self.o_true_pca_confidence_subject_to_object: Optional[float] = None
self.o_true_pca_confidence_object_to_subject: Optional[float] = None
def get_value(self, conf_name: ConfidenceEnum):
if conf_name is ConfidenceEnum.CWA_CONF:
return self.o_std_confidence
elif conf_name is ConfidenceEnum.ICW_CONF:
return self.o_c_weighted_std_conf
elif conf_name is ConfidenceEnum.PCA_CONF_S_TO_O:
return self.o_pca_confidence_subject_to_object
elif conf_name is ConfidenceEnum.PCA_CONF_O_TO_S:
return self.o_pca_confidence_object_to_subject
elif conf_name is ConfidenceEnum.IPW_CONF:
return self.o_relative_pu_confidence_unbiased
elif conf_name is ConfidenceEnum.IPW_PCA_CONF_S_TO_O:
return self.o_relative_pu_confidence_pca_subject_to_object
elif conf_name is ConfidenceEnum.IPW_PCA_CONF_O_TO_S:
return self.o_relative_pu_confidence_pca_object_to_subject
elif conf_name is ConfidenceEnum.TRUE_CONF:
return self.o_true_confidence
elif conf_name is ConfidenceEnum.TRUE_CONF_BIAS_YS_ZERO_S_TO_O:
return self.o_true_pca_confidence_subject_to_object
elif conf_name is ConfidenceEnum.TRUE_CONF_BIAS_YS_ZERO_O_TO_S:
return self.o_true_pca_confidence_object_to_subject
else:
raise Exception(f"Could not find RuleWrapper instance variable corresponding to {conf_name}")
@staticmethod
def create_rule_wrapper(
rule: PyloClause,
amie_dict: Optional[Dict],
o_n_predictions: Optional[int],
o_n_supported_predictions: Optional[int],
o_std_confidence: Optional[float],
o_pca_confidence_subject_to_object: Optional[float],
o_pca_confidence_object_to_subject: Optional[float],
o_c_weighted_std_conf: Optional[float],
o_relative_pu_confidence_unbiased: Optional[float],
o_relative_pu_confidence_pca_subject_to_object: Optional[float],
o_relative_pu_confidence_pca_object_to_subject: Optional[float],
o_true_confidence: Optional[float],
o_true_pca_confidence_subject_to_object: Optional[float],
o_true_pca_confidence_object_to_subject: Optional[float],
) -> 'RuleWrapper':
new_rule_wrapper: RuleWrapper = RuleWrapper(rule=rule, o_amie_dict=amie_dict)
new_rule_wrapper.o_n_predictions = o_n_predictions
new_rule_wrapper.o_n_supported_predictions = o_n_supported_predictions
new_rule_wrapper.o_std_confidence = o_std_confidence
new_rule_wrapper.o_pca_confidence_subject_to_object = o_pca_confidence_subject_to_object
new_rule_wrapper.o_pca_confidence_object_to_subject = o_pca_confidence_object_to_subject
new_rule_wrapper.o_c_weighted_std_conf = o_c_weighted_std_conf
new_rule_wrapper.o_relative_pu_confidence_unbiased = o_relative_pu_confidence_unbiased
new_rule_wrapper.o_relative_pu_confidence_pca_subject_to_object = o_relative_pu_confidence_pca_subject_to_object
new_rule_wrapper.o_relative_pu_confidence_pca_object_to_subject = o_relative_pu_confidence_pca_object_to_subject
new_rule_wrapper.o_true_confidence = o_true_confidence
new_rule_wrapper.o_true_pca_confidence_subject_to_object = o_true_pca_confidence_subject_to_object
new_rule_wrapper.o_true_pca_confidence_object_to_subject = o_true_pca_confidence_object_to_subject
return new_rule_wrapper
def set_inverse_c_weighted_std_confidence(self, label_frequency: float) -> None:
if label_frequency == 0.0:
raise Exception("Label frequency cannot be zero")
self.o_c_weighted_std_conf = 1 / label_frequency * self.o_std_confidence
def instantiate_from_amie_dict(self, o_amie_dict: Optional[Dict] = None) -> None:
if o_amie_dict is None:
amie_dict_to_use: Optional[Dict] = self.amie_dict
else:
amie_dict_to_use = o_amie_dict
if amie_dict_to_use is None:
raise Exception(f"No AMIE dict available for rule wrapper {str(self)}")
else:
if self.o_n_predictions is not None:
print("overwriting n_predictions")
self.o_n_predictions = amie_dict_to_use[AmieOutputKeyEnum.BODY_SIZE.value]
if self.o_n_supported_predictions is not None:
print("overwriting o_n_supported_predictions")
self.o_n_supported_predictions = amie_dict_to_use[AmieOutputKeyEnum.N_POSITIVE_EXAMPLES.value]
if self.o_std_confidence is not None:
print("overwriting o_std_confidence")
self.o_std_confidence = amie_dict_to_use[AmieOutputKeyEnum.STD_CONF.value]
if amie_dict_to_use[AmieOutputKeyEnum.FUNCTIONAL_VARIABLE.value] == '?a':
if self.o_pca_confidence_subject_to_object is not None:
print("overwriting o_pca_confidence_subject_to_object")
self.o_pca_confidence_subject_to_object = amie_dict_to_use[AmieOutputKeyEnum.PCA_CONF.value]
print("ONLY value for o_pca_confidence_subject_to_object")
print("NO value for o_pca_confidence_object_to_subject")
elif amie_dict_to_use[AmieOutputKeyEnum.FUNCTIONAL_VARIABLE.value] == '?b':
if self.o_pca_confidence_object_to_subject is not None:
print("overwriting o_pca_confidence_object_to_subject")
self.o_pca_confidence_object_to_subject = amie_dict_to_use[AmieOutputKeyEnum.PCA_CONF.value]
print("ONLY value for o_pca_confidence_object_to_subject")
print("NO value for o_pca_confidence_subject_to_object")
else:
raise Exception(f"Unrecognized functional AMIE variabele: "
f"{amie_dict_to_use[AmieOutputKeyEnum.FUNCTIONAL_VARIABLE.value]}")
def to_json_file(self, filename: str, gzipped: bool = True):
dict_to_convert: Dict = dict()
rule_str = str(self.rule)
dict_to_convert['rule'] = rule_str
if self.amie_dict is not None:
dict_to_convert['amie_dict'] = self.amie_dict
if self.o_n_predictions is not None:
dict_to_convert['o_n_predictions'] = self.o_n_predictions
if self.o_n_supported_predictions is not None:
dict_to_convert['o_n_supported_predictions'] = self.o_n_supported_predictions
if self.o_std_confidence is not None:
dict_to_convert['o_std_confidence'] = self.o_std_confidence
if self.o_pca_confidence_subject_to_object is not None:
dict_to_convert['o_pca_confidence_subject_to_object'] = self.o_pca_confidence_subject_to_object
if self.o_pca_confidence_object_to_subject is not None:
dict_to_convert['o_pca_confidence_object_to_subject'] = self.o_pca_confidence_object_to_subject
if self.o_c_weighted_std_conf is not None:
dict_to_convert['o_c_weighted_std_conf'] = self.o_c_weighted_std_conf
if self.o_relative_pu_confidence_unbiased is not None:
dict_to_convert['o_relative_pu_confidence_unbiased'] = self.o_relative_pu_confidence_unbiased
if self.o_relative_pu_confidence_pca_subject_to_object is not None:
dict_to_convert[
'o_relative_pu_confidence_pca_subject_to_object'
] = self.o_relative_pu_confidence_pca_subject_to_object
if self.o_relative_pu_confidence_pca_object_to_subject is not None:
dict_to_convert[
'o_relative_pu_confidence_pca_object_to_subject'
] = self.o_relative_pu_confidence_pca_object_to_subject
# if self.o_absolute_pu_confidence is not None:
# dict_to_convert['o_absolute_pu_confidence'] = self.o_absolute_pu_confidence
if self.o_true_confidence is not None:
dict_to_convert['o_true_confidence'] = self.o_true_confidence
if self.o_true_pca_confidence_subject_to_object is not None:
dict_to_convert['o_true_pca_confidence_subject_to_object'] = self.o_true_pca_confidence_subject_to_object
if self.o_true_pca_confidence_object_to_subject is not None:
dict_to_convert['o_true_pca_confidence_object_to_subject'] = self.o_true_pca_confidence_object_to_subject
pretty_frozen = json.dumps(dict_to_convert, indent=2, sort_keys=True)
if gzipped:
open_func = gzip.open
else:
open_func = open
with open_func(filename, 'wt') as output_file:
output_file.write(pretty_frozen)
@staticmethod
def read_json(filename: str, gzipped: bool = True) -> 'RuleWrapper':
if gzipped:
open_func = gzip.open
else:
open_func = open
with open_func(filename, 'rt') as input_file:
dict_to_convert: Dict = json.load(input_file)
rule_str: str = dict_to_convert["rule"]
problog_rule: ProblogClause = ProblogTerm.from_string(rule_str)
pylo_rule: Union[PyloLiteral, PyloClause] = convert_clause(
clause=problog_rule,
context=pylo_global_context
)
rule_wrapper = RuleWrapper.create_rule_wrapper(
rule=pylo_rule,
amie_dict=dict_to_convert.get('amie_dict', None),
o_n_predictions=dict_to_convert.get('o_n_predictions', None),
o_n_supported_predictions=dict_to_convert.get('o_n_supported_predictions', None),
o_std_confidence=dict_to_convert.get('o_std_confidence', None),
o_pca_confidence_subject_to_object=dict_to_convert.get(
'o_pca_confidence_subject_to_object', None),
o_pca_confidence_object_to_subject=dict_to_convert.get(
'o_pca_confidence_object_to_subject', None),
o_c_weighted_std_conf=dict_to_convert.get('o_c_weighted_std_conf', None),
o_relative_pu_confidence_unbiased=dict_to_convert.get(
'o_relative_pu_confidence_unbiased', None),
o_relative_pu_confidence_pca_subject_to_object=dict_to_convert.get(
'o_relative_pu_confidence_pca_subject_to_object', None),
o_relative_pu_confidence_pca_object_to_subject=dict_to_convert.get(
'o_relative_pu_confidence_pca_object_to_subject', None),
# o_absolute_pu_confidence = dict_to_convert.get('o_absolute_pu_confidence', None)
o_true_confidence=dict_to_convert.get('o_true_confidence', None),
o_true_pca_confidence_subject_to_object=dict_to_convert.get(
'o_true_pca_confidence_subject_to_object', None),
o_true_pca_confidence_object_to_subject=dict_to_convert.get(
'o_true_pca_confidence_object_to_subject', None),
)
return rule_wrapper
def clone_with_metrics_unset(self) -> 'RuleWrapper':
return RuleWrapper(rule=self.rule)
def clone(self) -> 'RuleWrapper':
return RuleWrapper.create_rule_wrapper(
rule=self.rule,
amie_dict=self.amie_dict,
o_n_predictions=self.o_n_predictions,
o_n_supported_predictions=self.o_n_supported_predictions,
o_std_confidence=self.o_std_confidence,
o_pca_confidence_subject_to_object=self.o_pca_confidence_subject_to_object,
o_pca_confidence_object_to_subject=self.o_pca_confidence_object_to_subject,
o_c_weighted_std_conf=self.o_c_weighted_std_conf,
o_relative_pu_confidence_unbiased=self.o_relative_pu_confidence_unbiased,
o_relative_pu_confidence_pca_subject_to_object=self.o_relative_pu_confidence_pca_subject_to_object,
o_relative_pu_confidence_pca_object_to_subject=self.o_relative_pu_confidence_pca_object_to_subject,
o_true_confidence=self.o_true_confidence,
o_true_pca_confidence_subject_to_object=self.o_true_pca_confidence_subject_to_object,
o_true_pca_confidence_object_to_subject=self.o_true_pca_confidence_object_to_subject,
)
@property
def o_rule_support(self) -> Optional[int]:
"""
The support of the rule =
the number of predictions in the observed KB.
:return:
"""
return self.o_n_supported_predictions
@property
def o_body_support(self) -> Optional[int]:
"""
The support of the rule BODY =
the number of predictions of the rule.
:return:
"""
return self.o_n_predictions
def __str__(self):
if self.amie_dict is not None:
return str(self.amie_dict)
else:
return str(self.rule)
def __repr__(self):
return self.__str__()
def get_columns_header(self) -> List[str]:
header = self.get_columns_header_without_amie()
if self.amie_dict is not None:
header = header + [key.value + " (AMIE)" for key in AmieOutputKeyEnum]
return header
@staticmethod
def get_columns_header_without_amie() -> List[str]:
return columns_header_without_amie
def to_row(self, include_amie_metrics: bool = True) -> List[Union[str, float]]:
row = [
str(self.rule),
self.o_n_supported_predictions,
self.o_n_predictions
] + [self.get_value(conf) for conf in ConfidenceEnum]
if include_amie_metrics and self.amie_dict is not None:
row.extend([self.amie_dict[key.value] for key in AmieOutputKeyEnum])
return row
def get_amie_rule_string_repr(self) -> str:
"""
:return: the string representation of the rule as generated by AMIE
"""
if self.amie_dict is None:
raise Exception()
else:
return self.amie_dict[AmieOutputKeyEnum.RULE.value]
def get_amie_head_coverage(self) -> float:
"""
support of the rule = # observed predictions
Head coverage of a rule = ------------------------------------------------------------
number of literals with the head relation in the observed KB
hc(r) = \frac{ supp(r) }{ #(x,y): h(x,y) }
The support of a rule (= the rule's observed predictions) is an integer number.
Head coverage gives a relative version by dividing it by
the number of literals with the head's relation in the observed KB.
:return: the head coverage according AMIE
"""
if self.amie_dict is None:
raise Exception()
else:
return self.amie_dict[AmieOutputKeyEnum.HEAD_COVERAGE.value]
def get_amie_rule_support(self) -> int:
"""
Rule support = nb of predictions done by the rule
:return:
"""
pass
@staticmethod
def create_rule_wrapper_from(amie_output_rule_series: pd.Series) -> 'RuleWrapper':
amie_rule_dict: Dict = amie_output_rule_series.to_dict()
rule_string: str = amie_rule_dict[AmieOutputKeyEnum.RULE.value]
problog_rule: ProblogClause = DatalogAMIERuleConvertor.convert_amie_datalog_rule_string_to_problog_clause(
rule_string)
pylo_rule: Union[PyloLiteral, PyloClause] = convert_clause(
clause=problog_rule,
context=pylo_global_context
)
return RuleWrapper(rule=pylo_rule, o_amie_dict=amie_rule_dict)
def set_std_confidence(self, calculated_value: float) -> None:
if self.amie_dict is not None:
amie_std_conf_value: float = self.amie_dict[AmieOutputKeyEnum.STD_CONF.value]
if abs(amie_std_conf_value - calculated_value) >= 0.01:
print(f"Calculated STD conf differs from AMIE: {calculated_value:0.3f} vs {amie_std_conf_value:0.3f}")
self.o_std_confidence = calculated_value
def set_pca_confidence(self, calculated_value: float) -> None:
if self.amie_dict is not None:
amie_pca_conf_value: float = self.amie_dict[AmieOutputKeyEnum.PCA_CONF.value]
if abs(amie_pca_conf_value - calculated_value) >= 0.01:
print(f"Calculated PCA conf differs from AMIE: {calculated_value:0.3f} vs {amie_pca_conf_value:0.3f}")
self.o_pca_confidence_subject_to_object = calculated_value
def get_pylo_rule_from_string(rule_str: str) -> PyloClause:
problog_rule: ProblogClause = ProblogTerm.from_string(rule_str)
pylo_rule: Union[PyloLiteral, PyloClause] = convert_clause(
clause=problog_rule,
context=pylo_global_context
)
return pylo_rule
def is_pylo_rule_recursive(pylo_rule: PyloClause) -> bool:
head_functor: str = pylo_rule.get_head().get_predicate().get_name()
body_functors: Set[str] = {
body_literal.get_predicate().get_name()
for body_literal in pylo_rule.get_body().get_literals()
}
return head_functor in body_functors
def filter_rules_predicting(
rule_wrapper_sequence: Iterator[RuleWrapper],
head_functor_set: Optional[Set[str]] = None
) -> List[RuleWrapper]:
# filtered_rules: List[RuleWrapper] = []
# rule_wrapper: RuleWrapper
# for rule_wrapper in rule_wrapper_sequence:
# if rule_wrapper.rule.get_head().predicate.name in head_functor_set:
# filtered_rules.append(rule_wrapper)
if head_functor_set is None:
return [rule_wrapper for rule_wrapper in rule_wrapper_sequence]
else:
return [rule_wrapper for rule_wrapper in rule_wrapper_sequence
if rule_wrapper.rule.get_head().predicate.name in head_functor_set]
def contains_rule_predicting_relation(
rule_wrapper_sequence: Iterator[RuleWrapper],
target_relation: str
) -> bool:
return len(
filter_rules_predicting(
rule_wrapper_sequence=rule_wrapper_sequence,
head_functor_set={target_relation}
)
) > 0
def create_amie_dataframe_from_rule_wrappers(rule_wrapper_collection: List[RuleWrapper]) -> pd.DataFrame:
columns = [AmieOutputKeyEnum.RULE.value] + [key for key in rule_wrapper_collection[0].amie_dict.keys()
if key != AmieOutputKeyEnum.RULE.value]
data: List[List] = []
for rule in rule_wrapper_collection:
row = [str(rule.rule)] + [rule.amie_dict[key] for key in columns if key != AmieOutputKeyEnum.RULE.value]
data.append(row)
df = pd.DataFrame(data=data, columns=columns)
return df
def create_extended_dataframe_from_rule_wrappers(rule_wrapper_collection: List[RuleWrapper]) -> pd.DataFrame:
columns_header = rule_wrapper_collection[0].get_columns_header()
row_data = []
rule_wrapper: RuleWrapper
for rule_wrapper in rule_wrapper_collection:
row = rule_wrapper.to_row()
row_data.append(row)
df = pd.DataFrame(data=row_data, columns=columns_header)
return df
def create_dataframe_without_amie_from_rule_wrappers(rule_wrapper_collection: List[RuleWrapper]) -> pd.DataFrame:
columns_header = RuleWrapper.get_columns_header_without_amie()
row_data = []
rule_wrapper: RuleWrapper
for rule_wrapper in rule_wrapper_collection:
row = rule_wrapper.to_row(include_amie_metrics=False)
row_data.append(row)
df = | pd.DataFrame(data=row_data, columns=columns_header) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Combine raw tweets data, per hour, into single CSV file."""
# pylint: disable=invalid-name,too-many-locals,too-many-arguments
import os
from datetime import datetime
from io import StringIO
from typing import Dict, List, Union
import boto3
import pandas as pd
def get_objects_in_one_s3_level(
s3b_name: str, content: Union[Dict, str], region: str
) -> Dict:
"""Get list of all storage objects in single S3 level."""
s3_client = boto3.client("s3", region_name=region)
# Get path to hourly sub-folders within each daily folder on S3 bucket
prefix = content if isinstance(content, str) else content.get("Prefix")
# Get list of all objects in all hourly sub-folders
# - each list of is a list of dictionaries, where each dict contains keys:
# - Key, LastModified, ETag, Size, StorageClass
response_new = s3_client.list_objects_v2(
Bucket=s3b_name, Prefix=prefix, Delimiter="/"
)
return response_new
def get_data_metadata(file: str, s3_bucket_name: str, region: str) -> Dict:
"""Extract data and file metadata from raw tweets data."""
s3_client = boto3.client("s3", region_name=region)
# Get File body (decoded contents) from file dictionary
file_body = s3_client.get_object(
Bucket=s3_bucket_name, Key=file.get("Key")
)["Body"].read()
# Get File name from file dictionary
file_name = os.path.basename(file["Key"])
return {"file_body": file_body, "file_name": file_name}
def get_attrs_extracted_from_tweet_text(
row: pd.Series, attr_type: str = "hashtags"
) -> str:
"""Get attrs (hashtags or usernames) extracted from tweet text."""
# Get extracted attribute (tweet_text_hashtags or tweet_text_usernames)
# from each tweet (row of a pandas DataFrame)
# - attributes will be the '|' separated string
extracted = str(row[f"tweet_text_{attr_type}"])
# Split the string by the pipe operator ('|') to give a single string of
# space-separated attributes
extracted_separated = (
" " + extracted.replace("|", " ") if str(extracted) != "nan" else ""
)
# print(
# row.name,
# type(extracted_separated),
# extracted_separated,
# f"extracted_{attr_type}={extracted_separated}",
# )
return extracted_separated
def get_datetime_string() -> str:
"""Generate current timestamp as string."""
return datetime.now().strftime("%Y%m%d%H%M%S")
def get_hourly_data_metadata(
data_list: List,
headers: List,
fpath: str,
cols_to_use: List[str],
unwanted_partial_strings_list: List[str],
combine_hashtags_usernames: bool = False,
get_metadata_agg: bool = False,
) -> List[pd.DataFrame]:
"""Load raw tweets data and file metadata into DataFrames."""
year, month, day, hour = fpath.split("/", 3)[-1].split("/", 3)
dfs = []
dfs_metadata = []
# Loop over list of dictionaries, where each dict corresponds to a
# separate file and contains keys: file_name, file_body (file contents)
for k, raw_data_contents in enumerate(data_list):
# Decode file contents and split by \n giving nested list
# - each sub-list is a single tweet and its metadata
single_buffer_data_strings = (
raw_data_contents["file_body"].decode("utf-8").split("\n")
)
# Iterate over nested list
all_buffer_contents = []
for q, data_string in enumerate(single_buffer_data_strings):
if data_string:
# split each sub-list by \t in order to get values for each
# field
values = data_string.strip().split("\t")
# print(
# k+1,
# q+1,
# len(raw_data_contents["file_body"]),
# len(values),
# len(values) != len(headers),
# data_string,
# )
# Append tweet metadata to dict
dfs_metadata.append(
{
"file": k + 1,
"file_name": raw_data_contents["file_name"],
"encoded_length": len(raw_data_contents["file_body"]),
"values_index": q + 1,
"len_values": len(values),
"malformed_values": len(values) != len(headers),
"file_year": year,
"file_month": month,
"file_day": day,
"file_hour": hour[:-1],
}
)
# Append tweet data to dict (if data is not malformed with
# more fields than expected)
if len(values) == len(headers):
all_buffer_contents.append(values)
# Convert nested list of tweet data into DataFrame and append raw data
# filename as separate column
df_row = | pd.DataFrame(all_buffer_contents, columns=headers) | pandas.DataFrame |
# Run this script as a "standalone" script (terminology from the Django
# documentation) that uses the Djano ORM to get data from the database.
# This requires django.setup(), which requires the settings for this project.
# Appending the root directory to the system path also prevents errors when
# importing the models from the app.
if __name__ == '__main__':
import sys
import os
import django
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir))
sys.path.append(parent_dir)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "metadataset.settings")
django.setup()
import pandas as pd
from publications.models import Intervention
# Load a csv file with the list to be added to the database.
csv = "./publications/data/invasive_species/interventions_2.0.csv"
df = pd.read_csv(csv, encoding="utf-8")
root_node = "invasive species"
root_node = Intervention.objects.get(intervention=root_node)
for row in df.itertuples():
code = str(row.Code1) + '.'
level1 = row.Level1
level1, created = Intervention.objects.get_or_create(intervention=level1, code=code, parent=root_node)
code = code + str(row.Code2) + '.'
level2 = row.Level2
if not pd.isnull(level2):
level2, created = Intervention.objects.get_or_create(intervention=level2, code=code, parent=level1)
code = code + str(row.Code3).replace('.0', '') + '.'
level3 = row.Level3
if not pd.isnull(level3):
level3, created = Intervention.objects.get_or_create(intervention=level3, code=code, parent=level2)
code = code + str(row.Code4) + '.'
level4 = row.Level4
if not pd.isnull(level4):
level4, created = Intervention.objects.get_or_create(intervention=level4, code=code, parent=level3)
code = code + str(row.Code5).replace('.0', '') + '.'
level5 = row.Level5
if not | pd.isnull(level5) | pandas.isnull |
#from rest_client import get_currency_data
import pandas as pd
from functools import reduce
import seaborn as sns
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import ExtraTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn import metrics
import matplotlib.pyplot as plt
def create_label(row, row_label):
print(row)
if row[row_label] > 0.09:
return 'rast'
if row[row_label] < -0.09:
return 'padec'
return 'enako'
def predict_regression(input, output, naziv_boxplot):
x_train, x_test, y_train, y_test = train_test_split(input, output, test_size=0.33, random_state=0)
lr = LinearRegression()
lr.fit(x_train, y_train)
lr_result = lr.predict(x_test)
dtr = DecisionTreeRegressor()
dtr.fit(x_train, y_train)
dtr_result = dtr.predict(x_test)
lin_vec = SVR()
lin_vec.fit(x_train, y_train)
lin_vec_result = lin_vec.predict(x_test)
#df_a = pd.DataFrame({'y_test': y_test, 'lr_result': lr_result,
# 'dtr_result': dtr_result, 'lin_vec_result': lin_vec_result})
list_val = ('mean absolute error', 'metoda regresije')
df_boxplot_mae = pd.DataFrame([[metrics.mean_absolute_error(lr_result, y_test), 'linearna regresija'],
[metrics.mean_absolute_error(dtr_result, y_test), 'regresijsko drevo'],
[metrics.mean_absolute_error(lin_vec_result, y_test), 'regresija SVM']], columns=list_val)
sns.barplot(x="metoda regresije", y="mean absolute error", data=df_boxplot_mae, palette="Reds").set_title(naziv_boxplot)
plt.xticks(rotation=0)
plt.show()
list_val = ('mean squared error', 'metoda regresije')
df_boxplot_mse = pd.DataFrame([[metrics.mean_squared_error(lr_result, y_test), 'linearna regresija'],
[metrics.mean_squared_error(dtr_result, y_test), 'regresijsko drevo'],
[metrics.mean_squared_error(lin_vec_result, y_test), 'regresija SVM']],
columns=list_val)
sns.barplot(x="metoda regresije", y="mean squared error", data=df_boxplot_mse, palette="Reds").set_title(naziv_boxplot)
plt.xticks(rotation=0)
plt.show()
list_val = ('r^2', 'metoda regresije')
df_boxplot_r2 = pd.DataFrame([[metrics.r2_score(lr_result, y_test), 'linearna regresija'],
[metrics.r2_score(dtr_result, y_test), 'regresijsko drevo'],
[metrics.r2_score(lin_vec_result, y_test), 'regresija SVM']],
columns=list_val)
sns.barplot(x="metoda regresije", y="r^2", data=df_boxplot_r2, palette="Reds").set_title(naziv_boxplot)
plt.xticks(rotation=0)
plt.show()
def predict_classification(df, input, output):
classifier_list = [
KNeighborsClassifier(),
LinearSVC(),
GaussianNB(),
DecisionTreeClassifier(),
RandomForestClassifier(),
ExtraTreeClassifier(),
AdaBoostClassifier(),
GradientBoostingClassifier(),
LogisticRegression()
]
results_array = []
print(len(df))
for classifier in classifier_list:
kfold = KFold(n_splits=len(df), random_state=0)
cv_results = cross_val_score(classifier, df[input], df[output],
cv=kfold, scoring="accuracy")
print(cv_results)
results_array.append(cv_results.mean())
print(cv_results.mean())
list_val = ('natančnost', 'klasifikator')
df_classification = pd.DataFrame([[results_array[0], classifier_list[0]],
[results_array[1], classifier_list[1]],
[results_array[2], classifier_list[2]],
[results_array[3], classifier_list[3]],
[results_array[4], classifier_list[4]],
[results_array[5], classifier_list[5]],
[results_array[6], classifier_list[6]]], columns=list_val)
sns.barplot(x="klasifikator", y="natančnost", data=df_classification, palette="Reds")
plt.xticks(rotation=-60)
plt.show()
#def predict_classification(df):
#BTC, EOS, ADA, LSK, ZRX, SKY
#df_bitcoin, df_eos, df_ada, df_lisk, df_zrx, df_sky
#df_btc = get_currency_data('bitcoin', 'bitcoin')
#df_btc.to_csv('files/data_bitcoin.csv')
#df_eos = get_currency_data('eosio', 'eos')
#df_eos.to_csv('files/data_eos.csv')
#df_ada = get_currency_data('input-output-hk', 'cardano-sl')
#df_ada.to_csv('files/data_ada.csv')
#df_lisk = get_currency_data('liskHQ', 'lisk')
#df_lisk.to_csv('files/data_lisk.csv')
#df_zrx = get_currency_data('0xProject', '0x.js')
#df_zrx.to_csv('files/data_zrx.csv')
#df_sky = get_currency_data('skycoin', 'skycoin')
#df_sky.to_csv('files/data_skycoin.csv')
df_eos = pd.read_csv('files/data_eos.csv', sep=',', decimal='.', index_col=0)
df_bitcoin = pd.read_csv('files/data_bitcoin.csv', sep=',', decimal='.', index_col=0)
df_ada = pd.read_csv('files/data_ada.csv', sep=',', decimal='.', index_col=0)
df_lisk = | pd.read_csv('files/data_lisk.csv', sep=',', decimal='.', index_col=0) | pandas.read_csv |
import os
import numpy as np
import pandas as pd
from pandas.core.common import array_equivalent
from plio.utils.utils import file_search
# This function reads the lookup tables used to expand metadata from the file names
# This is separated from parsing the filenames so that for large lists of files the
# lookup tables don't need to be read over and over
#
# Info in the tables is stored in a dict of dataframes so that only one variable
# (the dict) needs to be passed between functions
def read_refdata(LUT_files):
ID_info = pd.read_csv(LUT_files['ID'], index_col=0)
spectrometer_info = | pd.read_csv(LUT_files['spect'], index_col=0) | pandas.read_csv |
from avatar_models.utils.util import get_config
import pandas as pd
import os
from avatar_models.captioning.evaluate import CaptionWithAttention
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.spice.spice import Spice
from tqdm import tqdm
import json
import collections
import random
import pickle
from tensorflow.keras.preprocessing.text import text_to_word_sequence
from avatar_models.captioning.catr.predict import CATRInference
def get_ade20k_caption_annotations():
"""
Precondition: checkout the https://github.com/clp-research/image-description-sequences under the location
of the ade20k_dir directoiry
:return:
"""
conf = get_config()
ade20k_dir = conf["ade20k_dir"]
ade20k_caption_dir = conf["ade20k_caption_dir"]
captions_file = os.path.join(ade20k_caption_dir, "captions.csv")
sequences_file = os.path.join(ade20k_caption_dir, "sequences.csv")
captions_df = | pd.read_csv(captions_file, sep="\t",header=0) | pandas.read_csv |
import pandas as pd
from numpy import isnan
'''
@Author <NAME>
Reads in the final derived file in order to find
bluebook treatments, and then compares with statement outcome data
in order to determine which alternative was chosen at each meeting
'''
def main():
derived_df = pd.read_csv("../../../derivation/python/output/meeting_derived_file.csv")
alternative_df = extract_alternative_df(derived_df)
ffr_df = get_ffr(1988,2008)
merged_df = merge_ffr_alt_and_dec(ffr_df,alternative_df)
#print(merged_df)
merged_df.to_csv("../output/fed_targets_with_alternatives.csv")
# Process missing alternatives
bb_missing_df=pd.read_excel("../data/bluebook_missingalternatives.xlsx")
bb_missing_df['start_date'] = pd.to_datetime(bb_missing_df['date'])
bb_pivot = bb_missing_df.pivot(index = 'start_date', columns ='alt' , values =['text', 'change'])
bb_pivot = bb_pivot.reset_index()
new_cols = ['%s_%s' % (a, b if b else '') for a, b in bb_pivot.columns]
bb_pivot.columns = ['start_date'] + new_cols[1:]
bb_pivot = bb_pivot.merge(merged_df,on='start_date',how='inner')
bb_pivot=bb_pivot[['start_date','date', 'ffrtarget',
'target_before', 'target_after', 'decision',
'text_a', 'text_b', 'text_c',
'text_d', 'change_a','change_b', 'change_c', 'change_d']]
ren_dict = dict(zip(['change_a','change_b', 'change_c', 'change_d'],['bluebook_treatment_size_alt_a',
'bluebook_treatment_size_alt_b',
'bluebook_treatment_size_alt_c',
'bluebook_treatment_size_alt_d']))
text_dict = dict(zip([f"text_{alt}" for alt in ['a','b','c','d']],[f"alt {alt} corpus" for alt in ['a','b','c','d']]))
bb_pivot.rename(columns=ren_dict ,inplace=True)
bb_pivot.rename(columns=text_dict ,inplace=True)
for alt in ['a','b','c','d']:
bb_pivot[f"alt_{alt}_rate"] = bb_pivot['target_before'] - bb_pivot[f'bluebook_treatment_size_alt_{alt}']
bb_pivot[f'alt {alt} corpus'] = bb_pivot[f'alt {alt} corpus'].apply(lambda text : f"[{text}]")
bb_pivot.to_csv("../output/fed_targets_with_alternatives_missinbb.csv")
def get_ffr(startyear,endyear):
ffr=pd.read_excel("../../../collection/python/data/FRED_DFEDTAR.xls",skiprows=10)
ffr.rename(columns={"observation_date":"date","DFEDTAR":"ffrtarget"},inplace=True)
ffr['year']=ffr['date'].apply(lambda x: x.year)
ffr=ffr[(ffr['year']>=startyear) & (ffr['year']<=endyear)]
ffr['target_before'] = ffr['ffrtarget'].shift(1)
ffr['target_after'] = ffr['ffrtarget'].shift(-1)
#print(ffr)
return ffr
def merge_ffr_alt_and_dec(ffr,alternatives):
alternatives['alt_a'] = alternatives['bluebook_treatment_size_alt_a'].\
apply(lambda x: pd.to_numeric(x,errors="coerce"))
alternatives['alt_b'] = alternatives['bluebook_treatment_size_alt_b'].\
apply(lambda x: pd.to_numeric(x,errors="coerce"))
alternatives['alt_c'] = alternatives['bluebook_treatment_size_alt_c'].\
apply(lambda x: pd.to_numeric(x,errors="coerce"))
alternatives['alt_d'] = alternatives['bluebook_treatment_size_alt_d']. \
apply(lambda x: | pd.to_numeric(x, errors="coerce") | pandas.to_numeric |
from itertools import product
import networkx as nx
import numpy as np
import pandas as pd
from .probability import (
Variable, ProbabilityTree, JointDist, TreeDistribution)
class Equation(object):
"""Maps input variable(s) to output variable(s)"""
INPUT_LABEL = 'Input'
OUTPUT_LABEL = 'Output'
def __init__(self, name, inputs, outputs, strategy_func):
"""Use the strategy_func to map inputs to outputs.
Args:
name (str): Identifying name of equation.
inputs (List[Variable]): Variables to map from.
outputs (List[Variable]): Variables to map to.
strategy_func (function): Mapping function.
"""
assert str(name) == name
assert not [i for i in inputs if not isinstance(i, Variable)]
assert not [o for o in outputs if not isinstance(o, Variable)]
self.name = name
self.inputs = inputs
self.outputs = outputs
# Create an array with all possible combinations of states of inputs
input_states = list(product(*[i.states for i in inputs]))
self.input_states = input_states
# We need arrays to hold the results of each output (note: they could
# be different sizes)
self.per_state_results = [
np.zeros((len(input_states), o.n_states),
dtype=float) for o in outputs]
# Create a lookup table based on the strategy function. Then we can
# discard the function (very useful if we're interested in pickling).
self.lookup = {}
for i, states in enumerate(input_states):
# Get out relevant states to fill out
results = [c[i] for c in self.per_state_results]
# Send arguments as (input, input, ..., output, output, ...)
args = [s for s in states]
args.extend(results)
strategy_func(*args)
# Each of the output distributions must sum to 1.0
for r in results:
if not np.isclose(r.sum(), 1.0):
raise RuntimeError(
"Probabilities must add to 1.0: {}".format(r))
# Keep this around
self.lookup[states] = results
def calculate(self, assignments):
"""Calculate output given variable / state assignments"""
# Build a tuple of the relevant input states from the set of
# assignments given.
states = tuple([assignments[v] for v in self.inputs])
# Look them up
try:
results = self.lookup[states]
except KeyError:
raise RuntimeError("Error in {} with key {}".format(self, states))
# Now, construct a mapping over th output variables and return that.
return dict(zip(self.outputs, results))
def __repr__(self):
return "<{}>".format(self.name)
def to_frame(self):
"""Output the mapping equation in a nice way
We do this a long-winded way, but it allows pandas to do the nice
formatting for us. We generate a row for every single possibility of
input and outputs states of this variable, then use the pivot_table to
construct a table for us with nice row/column headings.
"""
# Create a set of dictionaries/lists for each column
data = dict([(i_var.name, []) for i_var in self.inputs])
data.update({self.OUTPUT_LABEL: [], self.INPUT_LABEL: [], self.name: []})
# A very ugly loop to produce all the probabilities in a nice way.
# Note that this just reproduces what is already in `self.lookup`.
# Honestly, I just haven't thought of a better way to get nice output.
for i_index, i_state in enumerate(self.input_states):
for o_var, results in zip(self.outputs, self.per_state_results):
for o_state, o_p in enumerate(results[i_index]):
for i_var, s in zip(self.inputs, i_state):
data[i_var.name].append(s)
data[self.OUTPUT_LABEL].append(o_var.name)
data[self.INPUT_LABEL].append(o_state)
data[self.name].append(o_p)
all_data = | pd.DataFrame(data=data) | pandas.DataFrame |
import sys
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
import nltk
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.corpus import stopwords
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import re
from sklearn.metrics import confusion_matrix
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn import multioutput
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import Pipeline
#ML models
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC,SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, accuracy_score
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
import pickle
import warnings
warnings.filterwarnings("ignore")
def evaulation_metric(y_true,y_pred):
'''
Input
y_true: ground truth dataframe
y_pred: predicted dataframe
Output
report: dataframe that contains mean f1-score,precision and recall value for each class
'''
report = pd.DataFrame ()
for col in y_true.columns:
class_dict = classification_report (output_dict = True, y_true = y_true.loc [:,col], y_pred = y_pred.loc [:,col])
metric_df = pd.DataFrame (pd.DataFrame.from_dict (class_dict))
metric_df.drop(['macro avg', 'weighted avg'], axis =1, inplace = True)
metric_df.drop(index = 'support', inplace = True)
metric_df = pd.DataFrame (metric_df.transpose ().mean ())
metric_df = metric_df.transpose ()
report = report.append (metric_df, ignore_index = True)
report.index = y_true.columns
return report
def load_data(database_filepath):
'''
Input
database_filepath: filepath of database
Output: X,y and category names from the database
'''
engine = create_engine('sqlite:///' + database_filepath)
df = | pd.read_sql_table('message_and_category', engine) | pandas.read_sql_table |
# -*- coding: utf-8 -*-
"""
@author: Adam
"""
import numpy as np
import pandas as pd
from tqdm import tqdm
from .trajectory import trajectory, final_position
def fly(fa, vol_t, initial, charge, mass, dt, **kwargs):
""" Calculate the trajectories of charged particles in a
time-varying electric field
Parameters
----------
fa :: FastAdjust
fast-adjust potential arrays
vol_t :: func(t),
returns np.array([v0, v1, ... vn]) (V)
initial :: pd.DataFrame
initial positions and velocities
charge :: float64
particle charge (C)
mass :: float64
particle mass (kg)
dt :: float64
time step (s)
max_iterations :: int
(default: 1 million)
mode :: str
'full' (default) or 'final'
tqdm_kw :: dict
keyword arguments for tqdm (progress bar)
Returns
-------
mode='full': step-by-step trajectories for all particles ::
pd.DataFrame(index=['particle', 'time'], columns=['x', 'y', 'z', 'KE', 'PE'])
mode='final': final position for all particles ::
pd.DataFrame(index=['particle'], columns=['t' 'x', 'y', 'z', 'vx', 'vy', 'vz'])
"""
max_iterations = kwargs.get("max_iterations", int(1e6))
mode = kwargs.get("mode", "full")
tqdm_kw = kwargs.get("tqdm_kw", {})
# fly ions
num = len(initial.index)
result = {}
for i, row in tqdm(initial.iterrows(), total=len(initial.index), **tqdm_kw):
t0 = row.time
x0 = np.array([row.x, row.y, row.z])
v0 = np.array([row.vx, row.vy, row.vz])
if fa.electrode_r(x0):
pass
elif mode == 'full':
result[i] = trajectory(fa, vol_t, t0, x0, v0,
charge, mass, dt, max_iterations)
elif mode == 'final':
result[i] = final_position(fa, vol_t, t0, x0, v0,
charge, mass, dt, max_iterations, to_series=False)
else:
raise ValueError("valid values for arg `mode` : 'full', 'final' ")
# output
if mode == "full":
return | pd.concat(result, names=["particle"]) | pandas.concat |
import argparse
import csv
import json
import joblib
import os
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from time import time
import sklearn
# https://www.kaggle.com/mantri7/imdb-movie-reviews-dataset?select=train_data+%281%29.csv
############################## Global variables ##############################
column_names = ['0', '1']
############################## Helper Functions ##############################
def read_data_arg(argument, channel):
# Take the set of files and read them all into a single pandas dataframe
input_files = [ os.path.join(argument, file) for file in os.listdir(argument) ]
if len(input_files) == 0:
raise ValueError(('There are no files in {}.\n' +
'This usually indicates that the channel ({}) was incorrectly specified,\n' +
'the data specification in S3 was incorrectly specified or the role specified\n' +
'does not have permission to access the data.').format(argument, channel))
raw_data = [ pd.read_csv(file,
header=None,
names=column_names ) for file in input_files ]
data = pd.concat(raw_data)
return data
############################## Required Functions ##############################
def model_fn(model_dir):
"""Deserialize fitted model"""
print('storing the model....')
clf = joblib.load(os.path.join(model_dir, "model.joblib"))
return clf
def input_fn(request_body,content_type):
print('in input_fun')
if content_type == 'application/json':
print("content_type is application/json....formatting the request body to a dataframe")
data = json.loads(request_body)
data = | pd.Series(data) | pandas.Series |
import pandas as pd
import geopandas as gpd
import numpy as np
import sqlite3
from sklearn.cluster import DBSCAN
import os
import osmnx as ox
import math
def osm_downloader(boundary=None, osm_path=None, regenerating_shp=False):
"""
Download drive network within a certain geographical boundary.
:param boundary:
geodataframe of the area for downloading the network.
:param osm_path:
file path to save the network in .shp and .graphml.
:param regenerating_shp:
if needs to regenerating the shape file.
:return:
None
"""
minx, miny, maxx, maxy = boundary.geometry.total_bounds
new_network = osm_path + 'drive.graphml'
new_network_shp = osm_path + 'drive.shp'
def shp_processor(G, new_network_shp):
print('Processing graphml to GeoDataframe...')
gdf_n = ox.graph_to_gdfs(G)
edge = gdf_n[1]
edge = edge.loc[:, ['geometry', 'highway', 'junction', 'length', 'maxspeed', 'name', 'oneway',
'osmid', 'u', 'v', 'width', 'lanes']]
fields = ['osmid', 'u', 'v', 'length', 'maxspeed', 'oneway']
df_inter = pd.DataFrame()
for f in fields:
df_inter[f] = edge[f].astype(str)
gdf_edge = gpd.GeoDataFrame(df_inter, geometry=edge["geometry"])
gdf_edge = gdf_edge.rename(columns={'osmid': 'osm_id', 'maxspeed': 'max_speed'})
print('Saving as shp...')
gdf_edge.to_file(new_network_shp)
if not os.path.exists(new_network):
print('Downloading graphml...')
G = ox.graph_from_bbox(maxy, miny, maxx, minx, network_type='drive')
print('Saving as graphml...')
ox.save_graphml(G, filepath=new_network)
if not os.path.exists(new_network_shp):
shp_processor(G, new_network_shp)
else:
if regenerating_shp:
print('Loading graphml...')
G = ox.load_graphml(new_network)
shp_processor(G, new_network_shp)
if not os.path.exists(new_network_shp):
print('Loading graphml...')
G = ox.load_graphml(new_network)
shp_processor(G, new_network_shp)
else:
print('Drive networks exist. Skip downloading.')
def filter_trips(df=None, boundary=None):
"""
Filter trips within a certain geographical boundary.
:param boundary:
geodataframe of the area for downloading the network.
:param df, dataframe:
[userid, timeslot, day_n, distance, latitude, longitude, latitude_d, longitude_d].
:return:
Filtered trips, dataframe
[userid, timeslot, day_n, distance, latitude, longitude, latitude_d, longitude_d].
"""
# Origin
print('Filtering origins...')
gdf = gpd.GeoDataFrame(
df,
crs="EPSG:4326",
geometry=gpd.points_from_xy(df.longitude, df.latitude)
)
gdf = gpd.clip(gdf, boundary.convex_hull)
gdf.drop(columns=['geometry'], inplace=True)
# Destination
print('Filtering destinations...')
gdf = gpd.GeoDataFrame(
gdf,
crs="EPSG:4326",
geometry=gpd.points_from_xy(gdf.longitude_d, gdf.latitude_d)
)
gdf = gpd.clip(gdf, boundary.convex_hull)
gdf.drop(columns=['geometry'], inplace=True)
return gdf
def cluster(ts, eps_km=0.1, min_samples=1):
"""
Clusters each users tweets with DBSCAN.
:param ts:
[userid*, latitude, longitude, ...rest]
:param eps_km:
eps parameter of DBSCAN expressed in kilometers.
:param min_samples:
min_samples parameter of DBSCAN.
:return:
[userid*, latitude, longitude, region, ...rest]
"""
def f(_ts):
kms_per_radian = 6371.0088
coords_rad = np.radians(_ts[['latitude', 'longitude']].values)
cls = DBSCAN(eps=eps_km / kms_per_radian, min_samples=min_samples, metric='haversine').fit(coords_rad)
return _ts.assign(region=pd.Series(cls.labels_, index=_ts.index).values)
regions = ts.groupby('userid', as_index=False).apply(f)
return regions
def during_home(ts):
"""
Only returns tweets that are during "home-hours".
"""
weekdays = (ts['weekday'] < 6) & (0 < ts['weekday'])
weekends = (ts['weekday'] == 6) | (0 == ts['weekday'])
morning_evening = (ts['hourofday'] < 9) | (17 < ts['hourofday'])
return ts[((weekdays) & (morning_evening)) | (weekends)]
def label_home(ts):
"""
Labels the most visited region during "home-hours" as home.
input: (* = index)
[userid*, region, ...rest]
output:
[userid*, region, label, ...rest]
"""
_ts = ts.copy(deep=True)
_ts = _ts.reset_index().set_index(['userid', 'region']).sort_index()
_ts = _ts.assign(
label=pd.Series(dtype=str, index=_ts.index).fillna('other'),
)
homeidxs = during_home(_ts) \
.groupby(['userid', 'region']).size() \
.groupby('userid').nlargest(1) \
.droplevel(0).index
_ts.loc[homeidxs, 'label'] = 'home'
return _ts.reset_index().set_index('userid')
def gaps(df):
dtypes = df.dtypes.to_dict()
df_or = df.shift(1).dropna().astype(dtypes).reset_index(drop=True)
df_ds = df.shift(-1).dropna().astype(dtypes).reset_index(drop=True)
df = df_or.join(df_ds, lsuffix="_origin", rsuffix="_destination")
df = df.assign(duration=df['createdat_destination'] - df['createdat_origin'])
return df
def visit_gaps(visits):
"""
:param visits:
DataFrame of visits indexed by "userid".
[userid*, ...rest]
:return:
DataFrame of gaps between visits for each user.
[userid*, ...rest_origin, ...rest_destination]
"""
def f(user_visits):
origins = user_visits.shift(1).dropna().astype(visits.dtypes.to_dict()).reset_index(drop=True)
destinations = user_visits.shift(-1).dropna().astype(visits.dtypes.to_dict()).reset_index(drop=True)
return origins.join(destinations, lsuffix="_origin", rsuffix="_destination")
return visits.groupby('userid').apply(f).reset_index(level=1, drop=True)
geotweet_paths = {
"sweden": os.getcwd() + "/dbs/sweden/geotweets.csv",
"sweden_infered": os.getcwd() + "/dbs/sweden/geotweets_infered.csv",
}
def read_geotweets_raw(path):
ts = | pd.read_csv(path) | pandas.read_csv |
import asyncio
import logging
import os
from enum import Enum
from typing import List, Optional, Tuple
import pandas as pd
from aiohttp import ClientSession
from pydantic import Field
from toucan_connectors.common import get_loop
from toucan_connectors.toucan_connector import ToucanConnector, ToucanDataSource
from .constants import MAX_RUNS, PER_PAGE
from .helpers import DICTIONARY_OF_FORMATTERS, build_df, build_empty_df
BASE_ROUTE = 'https://proxy.bearer.sh/aircall_oauth'
BEARER_API_KEY = os.environ.get('BEARER_API_KEY')
async def fetch_page(
dataset: str,
data_list: List[dict],
session: ClientSession,
limit,
current_pass: int,
new_page=1,
delay_counter=0,
) -> List[dict]:
"""
Fetches data from AirCall API
dependent on existence of other pages and call limit
"""
endpoint = f'{BASE_ROUTE}/{dataset}?per_page={PER_PAGE}&page={new_page}'
data: dict = await fetch(endpoint, session)
logging.getLogger(__file__).info(
f'Request sent to Aircall for page {new_page} for dataset {dataset}'
)
aircall_error = data.get('error')
if aircall_error:
logging.getLogger(__file__).error(f'Aircall error has occurred: {aircall_error}')
delay_timer = 1
max_num_of_retries = 3
await asyncio.sleep(delay_timer)
if delay_counter < max_num_of_retries:
delay_counter += 1
logging.getLogger(__file__).info('Retrying Aircall API')
data_list = await fetch_page(
dataset, data_list, session, limit, current_pass, new_page, delay_counter
)
else:
logging.getLogger(__file__).error('Aborting Aircall requests')
raise Exception(f'Aborting Aircall requests due to {aircall_error}')
delay_counter = 0
data_list.append(data)
next_page_link = None
meta_data = data.get('meta')
if meta_data is not None:
next_page_link: Optional[str] = meta_data.get('next_page_link')
if limit > -1:
current_pass += 1
if next_page_link is not None and current_pass < limit:
next_page = meta_data['current_page'] + 1
data_list = await fetch_page(
dataset, data_list, session, limit, current_pass, next_page
)
else:
if next_page_link is not None:
next_page = meta_data['current_page'] + 1
data_list = await fetch_page(
dataset, data_list, session, limit, current_pass, next_page
)
return data_list
async def fetch(new_endpoint, session: ClientSession) -> dict:
"""The basic fetch function"""
async with session.get(new_endpoint) as res:
return await res.json()
class AircallDataset(str, Enum):
calls = 'calls'
tags = 'tags'
users = 'users'
class AircallDataSource(ToucanDataSource):
limit: int = Field(MAX_RUNS, description='Limit of entries (default is 1 run)', ge=-1)
dataset: AircallDataset = 'calls'
class AircallConnector(ToucanConnector):
"""
This is a connector for [Aircall](https://developer.aircall.io/api-references/#endpoints)
using [Bearer.sh](https://app.bearer.sh/)
"""
data_source_model: AircallDataSource
bearer_integration = 'aircall_oauth'
bearer_auth_id: str
async def _get_data(self, dataset: str, limit) -> Tuple[List[dict], List[dict]]:
"""Triggers fetches for data and does preliminary filtering process"""
headers = {'Authorization': BEARER_API_KEY, 'Bearer-Auth-Id': self.bearer_auth_id}
async with ClientSession(headers=headers) as session:
team_data, variable_data = await asyncio.gather(
fetch_page('teams', [], session, limit, 0,),
fetch_page(dataset, [], session, limit, 0,),
)
team_response_list = []
variable_response_list = []
if len(team_data) > 0:
for data in team_data:
for team_obj in data['teams']:
team_response_list += DICTIONARY_OF_FORMATTERS['teams'](team_obj)
if len(variable_data) > 0:
for data in variable_data:
variable_response_list += [
DICTIONARY_OF_FORMATTERS.get(dataset, 'users')(obj) for obj in data[dataset]
]
return team_response_list, variable_response_list
async def _get_tags(self, dataset: str, limit) -> List[dict]:
"""Triggers fetches for tags and does preliminary filtering process"""
headers = {'Authorization': BEARER_API_KEY, 'Bearer-Auth-Id': self.bearer_auth_id}
async with ClientSession(headers=headers) as session:
raw_data = await fetch_page(dataset, [], session, limit, 1,)
tags_data_list = []
for data in raw_data:
tags_data_list += data['tags']
return tags_data_list
def run_fetches(self, dataset, limit) -> Tuple[List[dict], List[dict]]:
"""sets up event loop and fetches for 'calls' and 'users' datasets"""
loop = get_loop()
future = asyncio.ensure_future(self._get_data(dataset, limit))
return loop.run_until_complete(future)
def run_fetches_for_tags(self, dataset, limit):
"""sets up event loop and fetches for 'tags' dataset"""
loop = get_loop()
future = asyncio.ensure_future(self._get_tags(dataset, limit))
return loop.run_until_complete(future)
def _retrieve_data(self, data_source: AircallDataSource) -> pd.DataFrame:
"""retrieves data from AirCall API"""
dataset = data_source.dataset
empty_df = build_empty_df(dataset)
# NOTE: no check needed on limit here because a non-valid limit
# raises a Pydantic ValidationError
limit = data_source.limit
if dataset == 'tags':
non_empty_df = | pd.DataFrame([]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from emissions.trainer import Trainer
from emissions.data import load_data, clean_data
from sklearn.metrics import precision_score
class ImpSearch():
"""
this class is built to facilitate analysis for answering following question:
How different {year} could have been with implementation of our solution?
What ImSearch do:
1. For each param,
it trains the model, performs implement analysis on test year
and then collects total pollution quantity in that year caused by vehicles that failed the test.
2. After finishing the above steps for all possible params,
it will select the params that gave the smallest pollution quantity
as best param and plot the implementation outcome for that year
check out notebooks/what_if_2020.ipynb for usage
"""
cols = ['VEHICLE_AGE', 'MILE_YEAR', 'MAKE',
'MODEL_YEAR', 'ENGINE_WEIGHT_RATIO']
cat_col = ['MAKE']
def __init__(self):
"""
"""
self.df = None
self.X_train = None
self.X_test = None
self.y_train = None
self.y_test = None
self.total_tests = None
self.total_fails = None
self.year = None
self.max_depth = None
self.n_estimators = 1
self.best_depth = None
self.pollutions = None
self.total_predicted_fails = None
self.anaylsis_table = None
def load_data(self):
"""
1. loads clean data and save it as class attribute self.df
2. adds counter columns for all the tests and failed tests
count_fail: 1 if the test result is fail else 0
count_test: 1 for each test
"""
df = load_data()
df = clean_data(df)
df['count_test'] = 1
df['count_fail'] = df.RESULT
self.df = df
def train_test_split(self, year):
'''
for a given year, splits data to train (before that year) and test (in that year)
'''
train = self.df[self.df.TEST_SDATE.dt.year < year].sort_values('TEST_SDATE')
test = self.df[self.df.TEST_SDATE.dt.year == year].sort_values('TEST_SDATE')
self.y_train = train.pop('RESULT')
self.X_train = train
self.y_test = test.pop('RESULT')
self.X_test = test
self.total_tests = self.X_test.shape[0]
self.total_fails = self.y_test.sum()
def get_estimator(self, depth):
'''
uses Trainer class from trainer.py to get the fitted estimator
prints the evluation scores
if you want to plot learning curve, uncomment the last line
'''
trainer = Trainer(self.X_train[self.cols],
self.y_train,
metric='precision',
n_estimators = self.n_estimators,
with_categorical=self.cat_col,
max_depth=depth
)
trainer.grid_search()
print('\nmax_depth:', trainer.search_result.best_params_['model__max_depth'])
tmp = trainer.evaluate(self.X_test[self.cols], self.y_test)
print(tmp)
# trainer.learning_curve()
return trainer
def get_counter_table(self):
'''
creates a counter table with TEST_SDATE as index and having columns:
n_tests: cumulative number of tests along the year
n_fails: cumulative number of failed tests along the year
'''
df = self.X_test[['TEST_SDATE', 'count_fail', 'count_test']].copy()
df.set_index( | pd.DatetimeIndex(df.TEST_SDATE) | pandas.DatetimeIndex |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import os
# In[2]:
train_encoded = pd.read_csv("../data/train_store_encoded_onehot.csv")
# In[3]:
train_df = pd.read_csv("../data/train.csv")
store_df = pd.read_csv("../data/store.csv")
# In[4]:
cate_df = store_df.apply(lambda x: (x["Store"], x["StoreType"] + x["Assortment"]), axis = 1).map(lambda x: x[-1]).copy().reset_index()
cate_df.columns = ["Store", "cate"]
cate_df["Store"] = cate_df["Store"] + 1
# In[5]:
def calculate_days_num(data_df, cate_df):
import gc
data_df["Date"] = pd.to_datetime(data_df["Date"])
merge_df = pd.merge(data_df[["Date", "Store", "Sales"]], cate_df, on = "Store", how = "inner")
print("merge_df shape : {}".format(merge_df.shape))
from functools import reduce
ordered_intersection_dates = sorted(pd.to_datetime(sorted(reduce(lambda a, b: a.intersection(b),map(lambda x: set(x.tolist()),merge_df.groupby("cate").apply(dict).map(lambda inner_dict:inner_dict["Date"]).values.tolist())))))
ordered_intersection_dates = | pd.Series(ordered_intersection_dates) | pandas.Series |
#!/usr/bin/env python
import pandas as pd
from app.solr import get_collections, get_connection, get_query, get_count, get_schema, set_schema
import requests
import json
DEBUG = True
if __name__ == '__main__':
DEBUG = False
if DEBUG:
pd.set_option('display.max_columns', None)
MONDAY = pd.offsets.Week(weekday=0)
def bitmap_range(date_from, date_to, this_bitstring):
range_idx = pd.date_range(date_from + MONDAY, date_to + MONDAY, freq=MONDAY)
return pd.Series(index=range_idx, data=this_bitstring)
def fill_bitmap(this_row):
inverse_row = this_row['Days'][::-1]
return bitmap_range(this_row['Date_From'], this_row['Date_To'], inverse_row)
def get_bitmaps():
from datetime import datetime
from math import ceil
#STARTYEAR = pd.offsets.YearBegin()
WEEK = pd.offsets.Week()
DAY = | pd.offsets.Day() | pandas.offsets.Day |
# coding: utf-8
# # Chart presentation (6) - Creating custom hovertext
# In the last lesson we found out how to control what, how and where the hover information is displayed on a chart.
#
# In this lesson we'll learn how to create a custom text field in a Pandas DataFrame using the <code>apply()</code> and <code>lambda</code> functions. We'll also learn how to style this custom field using some HTML tags.
#
# Setting custom hovertext gives you very fine control over how the contextual information in your chart is displayed. Doing this correctly will really make your charts stand out.
#
# This lesson has a strong focus on manipulating data in a Pandas DataFrame. We'll learn several different data manipulation techniques, but if you get stuck or don't understand something, you can ask in the comments section or email <a href="mailto:<EMAIL>"><EMAIL></a>.
# ## Module Imports
# In[2]:
#plotly.offline doesn't push your charts to the clouds
import plotly.offline as pyo
#allows us to create the Data and Figure objects
from plotly.graph_objs import *
#plotly.plotly pushes your charts to the cloud
import plotly.plotly as py
#pandas is a data analysis library
import pandas as pd
from pandas import DataFrame
# In[3]:
#lets us see the charts in an iPython Notebook
pyo.offline.init_notebook_mode() # run at the start of every ipython
# ### Getting the data
#
# We'll load the house price and ranks dataset from my website. This contains the average house price data for each region on the first day of each year from 1995 - 2016, as well as the rank of each region (1 being the most expensive).
#
# We're going to create a text column for each Region which will contain the hovertext that will be displayed on the chart. This text field will contain the Region's name in bold, then on the next line, the average price for that year (formatted as £), and on the final line, the region's rank of house price in italics.
# In[38]:
housePrices = | pd.read_csv("http://www.richard-muir.com/data/public/csv/RegionalHousePricesAndRanksJan16.csv") | pandas.read_csv |
"""LogToDataFrame: Converts a Zeek log to a Pandas DataFrame"""
# Third Party
import pandas as pd
# Local
from zat import zeek_log_reader
class LogToDataFrame(object):
"""LogToDataFrame: Converts a Zeek log to a Pandas DataFrame
Notes:
This class has recently been overhauled from a simple loader to a more
complex class that should in theory:
- Select better types for each column
- Should be faster
- Produce smaller memory footprint dataframes
If you have any issues/problems with this class please submit a GitHub issue.
More Info: https://supercowpowers.github.io/zat/large_dataframes.html
"""
def __init__(self):
"""Initialize the LogToDataFrame class"""
# First Level Type Mapping
# This map defines the types used when first reading in the Zeek log into a 'chunk' dataframes.
# Types (like time and interval) will be defined as one type at first but then
# will undergo further processing to produce correct types with correct values.
# See: https://stackoverflow.com/questions/29245848/what-are-all-the-dtypes-that-pandas-recognizes
# for more info on supported types.
self.type_map = {'bool': 'category', # Can't hold NaN values in 'bool', so we're going to use category
'count': 'UInt64',
'int': 'Int32',
'double': 'float',
'time': 'float', # Secondary Processing into datetime
'interval': 'float', # Secondary processing into timedelta
'port': 'UInt16'
}
def _get_field_info(self, log_filename):
"""Internal Method: Use ZAT log reader to read header for names and types"""
_zeek_reader = zeek_log_reader.ZeekLogReader(log_filename)
_, field_names, field_types, _ = _zeek_reader._parse_zeek_header(log_filename)
return field_names, field_types
def _create_initial_df(self, log_filename, all_fields, usecols, dtypes):
"""Internal Method: Create the initial dataframes by using Pandas read CSV (primary types correct)"""
return pd.read_csv(log_filename, sep='\t', names=all_fields, usecols=usecols, dtype=dtypes, comment="#", na_values='-')
def create_dataframe(self, log_filename, ts_index=True, aggressive_category=True, usecols=None):
""" Create a Pandas dataframe from a Bro/Zeek log file
Args:
log_fllename (string): The full path to the Zeek log
ts_index (bool): Set the index to the 'ts' field (default = True)
aggressive_category (bool): convert unknown columns to category (default = True)
usecol (list): A subset of columns to read in (minimizes memory usage) (default = None)
"""
# Grab the field information
field_names, field_types = self._get_field_info(log_filename)
all_fields = field_names # We need ALL the fields for later
# If usecols is set then we'll subset the fields and types
if usecols:
# Usecols needs to include ts
if 'ts' not in usecols:
usecols.append('ts')
field_types = [t for t, field in zip(field_types, field_names) if field in usecols]
field_names = [field for field in field_names if field in usecols]
# Get the appropriate types for the Pandas Dataframe
pandas_types = self.pd_column_types(field_names, field_types, aggressive_category)
# Now actually read in the initial dataframe
self._df = self._create_initial_df(log_filename, all_fields, usecols, pandas_types)
# Now we convert 'time' and 'interval' fields to datetime and timedelta respectively
for name, zeek_type in zip(field_names, field_types):
if zeek_type == 'time':
self._df[name] = | pd.to_datetime(self._df[name], unit='s') | pandas.to_datetime |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy import interpolate
import pickle # to serialise objects
from scipy import stats
import seaborn as sns
from sklearn import metrics
from sklearn.model_selection import train_test_split
sns.set(style='whitegrid', palette='muted', font_scale=1.5)
RANDOM_SEED = 42
dataset_train = pd.read_csv('final_training_set_8people.csv')
training_set = pd.DataFrame(dataset_train.iloc[:,:].values)
training_set.columns = ["User","Activity", "Timeframe", "X axis", "Y axis", "Z axis"]
X = training_set.iloc[:, 3]
X = X.astype(float)
X = (X*1000000).astype('int64')
Y = training_set.iloc[:, 4]
Y = Y.astype(float)
Y = (Y*1000000).astype('int64')
Z = training_set.iloc[:, 5]
Z = Z.astype(float)
Z = (Z*1000000).astype('int64')
Old_T = (training_set.iloc[:, 2]).astype(float)
Old_T = (Old_T * 1000000)
Old_T = Old_T.astype('int64')
New_T = np.arange(0, 12509996000, 50000)
New_T = New_T.astype('int64')
# find interpolation function
interpolate_function = interpolate.interp1d(Old_T, X, axis = 0, fill_value="extrapolate")
X_Final = interpolate_function((New_T))
interpolate_function = interpolate.interp1d(Old_T, Y, axis = 0, fill_value="extrapolate")
Y_Final = interpolate_function((New_T))
interpolate_function = interpolate.interp1d(Old_T, Z, axis = 0, fill_value="extrapolate")
Z_Final = interpolate_function((New_T))
#Combining data into one pandas dataframe
Dataset = pd.DataFrame()
Dataset['X_Final'] = X_Final
Dataset['Y_Final'] = Y_Final
Dataset['Z_Final'] = Z_Final
Dataset['New_Timeframe'] = New_T
Dataset = Dataset/1e6
Dataset = Dataset[['New_Timeframe', 'X_Final', 'Y_Final', 'Z_Final']]
Dataset['New_Activity'] = ""
#Dataset = Dataset.astype('int64')
Dataset = Dataset[['New_Activity', 'New_Timeframe', 'X_Final', 'Y_Final', 'Z_Final']]
#function to fill in new dataset with related activity
Dataset = Dataset.to_numpy()
training_set = training_set.to_numpy()
time = 0
temp = training_set[0][1]
var_to_assign = ""
last_row = 0
new_row = 0
for i in range(len(training_set)-1):
if(training_set[i][1] == temp):
continue
if (training_set[i][1] != temp):
var_to_assign = temp
temp = training_set[i][1]
time = training_set[i][2]
a1 = [x for x in Dataset[:, 1] if x <= time]
new_row = len(a1)
Dataset[last_row:new_row+1, 0] = var_to_assign
last_row = new_row
continue
#converting both arrays back to Dataframes
Dataset = pd.DataFrame(Dataset)
Dataset.columns = ['New_Activity', 'New_Timeframe', 'X_Final', 'Y_Final', 'Z_Final']
training_set = pd.DataFrame(training_set)
training_set.columns = ["User","Activity", "Timeframe", "X axis", "Y axis", "Z axis"]
#Filling empty Dataset values
#Checking to see which index values are empty
df_missing = pd.DataFrame()
df_missing = Dataset[Dataset.isnull().any(axis=1)]
#Filling all empty values with preceding values
Dataset['New_Activity'].fillna(method = 'ffill', inplace = True)
#Combining smaller classes into larger/main classes
Dataset = Dataset.to_numpy()
for i in range(0, len(Dataset)-1):
if Dataset[i][0] == "a_loadwalk" or Dataset[i][0] == "a_jump":
Dataset[i][0] = "a_walk"
if Dataset[i][0] == "p_squat" or Dataset[i][0] == "p_kneel" or Dataset[i][0] == "p_lie" or Dataset[i][0] == "t_lie_sit" or Dataset[i][0] == "t_sit_lie" or Dataset[i][0] == "t_sit_stand":
Dataset[i][0] = "p_sit"
if Dataset[i][0] == "p_bent" or Dataset[i][0] == "t_bend" or Dataset[i][0] == "t_kneel_stand" or Dataset[i][0] == "t_stand_kneel" or Dataset[i][0] == "t_stand_sit" or Dataset[i][0] == "t_straighten" or Dataset[i][0] == "t_turn":
Dataset[i][0] = "p_stand"
Dataset = pd.DataFrame(Dataset)
Dataset.columns = ['New_Activity', 'New_Timeframe', 'X_Final', 'Y_Final', 'Z_Final']
#Feature Generation and Data Transformation
TIME_STEPS = 200
N_FEATURES = 3
STEP = 20
segments = []
labels = []
for i in range(0, len(Dataset) - TIME_STEPS, STEP): #To give the starting point of each batch
xs = Dataset['X_Final'].values[i: i + TIME_STEPS]
ys = Dataset['Y_Final'].values[i: i + TIME_STEPS]
zs = Dataset['Z_Final'].values[i: i + TIME_STEPS]
label = stats.mode(Dataset['New_Activity'][i: i + TIME_STEPS]) #this statement returns mode and count
label = label[0][0] #to ge value of mode
segments.append([xs, ys, zs])
labels.append(label)
#reshaping our data
reshaped_segments = np.asarray(segments, dtype = np.float32).reshape(-1, TIME_STEPS, N_FEATURES)
#reshaped_segments.shape
#Using one hot encoding
l = pd.DataFrame(labels)
l_one_hot = | pd.get_dummies(l) | pandas.get_dummies |
import datetime
import os
import sys
import tkinter as tk
import warnings
from tkinter import filedialog, messagebox
import ipywidgets as widgets
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ipywidgets import Button, HBox, Layout, VBox
sys.path.insert(0, os.path.join(os.path.dirname(os.getcwd()), 'scripts'))
from windroses import *
warnings.simplefilter("ignore")
class WidgetsMain(object):
def __init__(self):
self.path = os.path.dirname(os.getcwd())
def display(self):
create_project_button = widgets.Button(description='Criar projeto', tooltip='Cria um novo projeto', layout=Layout(
width='30%'), style={'description_width': 'initial'})
create_project_button.on_click(self.create_project_button_click)
load_project_button = widgets.Button(description='Importar projeto', tooltip='Importa o .csv de um projeto criado', layout=Layout(
width='30%'), style={'description_width': 'initial'})
load_project_button.on_click(self.load_project_button_click)
project_accordion = widgets.Accordion(
children=[create_project_button, load_project_button])
project_accordion.set_title(0, 'Criar projeto')
project_accordion.set_title(1, 'Importar projeto')
tab_contents = ['Projetos']
tab_children = [project_accordion]
tab = widgets.Tab()
tab.children = tab_children
for i in range(len(tab_children)):
tab.set_title(i, tab_contents[i])
return tab
def create_project_button_click(self, b):
self.project_dirs = self.create_project()
return self.project_dirs
def create_project(self):
if not os.path.exists(os.path.join(self.path, 'proj')):
os.makedirs(os.path.join(self.path, 'proj'))
sys._enablelegacywindowsfsencoding()
root = tk.Tk()
root.call('wm', 'attributes', '.', '-topmost', '1')
root.withdraw()
root.iconbitmap(os.path.join(self.path, 'logo.ico'))
root.update_idletasks()
create_project_asksaveasfilename_dir = filedialog.asksaveasfilename(initialdir=os.path.join(
self.path, 'proj'), title="Insira o nome desejado para seu projeto:", filetypes=[("Nome do projeto", ".")])
if create_project_asksaveasfilename_dir == '':
messagebox.showwarning("ondisapy", "Nenhum projeto criado.")
return None
else:
if not os.path.exists(create_project_asksaveasfilename_dir):
os.makedirs(create_project_asksaveasfilename_dir)
project_data_dir = (os.path.join(
create_project_asksaveasfilename_dir, 'data').replace('\\', '/'))
project_waves_dir = (os.path.join(
project_data_dir, 'wind_waves').replace('\\', '/'))
project_winds_dir = (os.path.join(
project_data_dir, 'wind_data').replace('\\', '/'))
project_wind_fetchs_dir = (os.path.join(
project_data_dir, 'wind_fetchs').replace('\\', '/'))
project_img_dir = (os.path.join(
create_project_asksaveasfilename_dir, 'img').replace('\\', '/'))
project_grid_dir = (os.path.join(
create_project_asksaveasfilename_dir, 'grid').replace('\\', '/'))
project_dirs_list = [project_data_dir, project_waves_dir, project_winds_dir,
project_wind_fetchs_dir, project_img_dir, project_grid_dir]
print("Diretórios de projeto criados:")
for i in project_dirs_list:
try:
os.makedirs(i)
print("%s" % i)
except OSError as Error:
if os.path.exists(i):
print("%s já existe." % i)
project_file_dir = (os.path.join(
create_project_asksaveasfilename_dir, 'dir.csv').replace('\\', '/'))
if not os.path.exists(project_file_dir):
project_name = os.path.basename(
create_project_asksaveasfilename_dir)
project_dirs_list.append(project_name)
project_dirs_dataframe = pd.DataFrame(
data={"dir": project_dirs_list})
project_dirs_dataframe.to_csv(
project_file_dir, sep='\t', index=False, header=True, encoding='utf-8')
messagebox.showinfo(
"ondisapy", "Projeto criado com sucesso:\n%s" % project_file_dir)
print("\nProjeto criado:\n%s\n" % project_file_dir)
return project_dirs_dataframe
else:
print("%s já existe.\n" % project_file_dir)
print("\n")
def load_project_button_click(self, b):
self.project_dirs = self.load_project()
return self.project_dirs
def load_project(self):
sys._enablelegacywindowsfsencoding()
root = tk.Tk()
root.call('wm', 'attributes', '.', '-topmost', '1')
root.withdraw()
root.iconbitmap(os.path.join(self.path, 'logo.ico'))
root.update_idletasks()
load_project_askopenfilename_dir = filedialog.askopenfilename(initialdir=os.path.join(
self.path, 'proj'), title="Confirme o diretório de importação do arquivo '.csv' do seu projeto:", filetypes=[(".csv", "*.csv")])
if load_project_askopenfilename_dir == '':
messagebox.showwarning("ondisapy", "Nenhum projeto importado.")
return None
else:
if not ('dir.csv') in str(load_project_askopenfilename_dir):
messagebox.showwarning(
"ondisapy", "Erro: arquivo inválido.\nO arquivo realmente é um .csv de projeto criado?")
return None
else:
project_dirs_dataframe = pd.read_csv(
load_project_askopenfilename_dir, sep='\t', engine='python', header=0, encoding='utf-8')
messagebox.showinfo(
"ondisapy", "Projeto importado com sucesso:\n%s" % load_project_askopenfilename_dir)
print("Projeto importado:\n%s\n" %
load_project_askopenfilename_dir)
return (project_dirs_dataframe)
class WidgetsWindData(object):
def __init__(self):
self.path = os.path.dirname(os.getcwd())
def display(self):
load_csat3_wind_data_button = widgets.Button(description='Importar modelo de dados de ventos CSAT3',
tooltip='Importa um modelo de dados de ventos CSAT3 para leitura', layout=Layout(width='30%'), style={'description_width': 'initial'})
load_csat3_wind_data_button.on_click(
self.load_csat3_wind_data_button_click)
load_windsonic_wind_data_button = widgets.Button(description='Importar modelo de dados de ventos Windsonic',
tooltip='Importa um modelo de dados de ventos Windsonic para leitura', layout=Layout(width='30%'), style={'description_width': 'initial'})
load_windsonic_wind_data_button.on_click(
self.load_windsonic_wind_data_button_click)
self.height_adjustment_checkbox = widgets.Checkbox(
description='Ajustar alturas (Soma vetorial)', value=False, layout=Layout(width='30%'), style={'description_width': 'initial'})
self.rl_checkbox = widgets.Checkbox(description='Utilizar RL', value=False, layout=Layout(
width='30%'), style={'description_width': 'initial'})
self.rt_checkbox = widgets.Checkbox(description='Utilizar RT', value=False, layout=Layout(
width='30%'), style={'description_width': 'initial'})
self.uz_checkbox = widgets.Checkbox(description='Utilizar U(z) (CSAT3)', value=False, layout=Layout(
width='30%'), style={'description_width': 'initial'})
self.bins_int_text = widgets.IntText(description='Intervalos:', value=10, layout=Layout(
width='30%'), style={'description_width': 'initial'})
self.step_int_text = widgets.IntText(description='Redutor:', value=1, layout=Layout(
width='30%'), style={'description_width': 'initial'})
self.speed_unit_text = widgets.Text(
description='Unidade (m/s):', value='m/s', layout=Layout(width='30%'), style={'description_width': 'initial'})
self.windrose_percentage_angle_float_text = widgets.FloatText(
description='Ângulo (°):', value=33.75, layout=Layout(width='30%'), style={'description_width': 'initial'})
wind_data_accordion = widgets.Accordion(
children=[load_csat3_wind_data_button, load_windsonic_wind_data_button])
wind_data_accordion.set_title(
0, 'Importar modelo de dados de ventos CSAT3')
wind_data_accordion.set_title(
1, 'Importar modelo dados de ventos Windsonic')
wind_adjustments_vbox = widgets.VBox(
[self.height_adjustment_checkbox, self.rl_checkbox, self.rt_checkbox, self.uz_checkbox])
wind_adjustments_accordion = widgets.Accordion(
children=[wind_adjustments_vbox])
wind_adjustments_accordion.set_title(
0, 'Ajustes a serem incluídos nos cálculos de velocidades processadas')
other_adjustments_accordion = widgets.Accordion(
children=[self.windrose_percentage_angle_float_text, self.bins_int_text, self.step_int_text, self.speed_unit_text])
other_adjustments_accordion.set_title(
0, 'Ângulo para a rosa dos ventos')
other_adjustments_accordion.set_title(1, 'Intervalos')
other_adjustments_accordion.set_title(2, 'Amostragem de dados')
other_adjustments_accordion.set_title(3, 'Unidade de velocidade')
tab_contents = ['Dados de Ventos',
'Ajustes de Cálculo', 'Outros Ajustes']
tab_children = [wind_data_accordion,
wind_adjustments_accordion, other_adjustments_accordion]
tab = widgets.Tab()
tab.children = tab_children
for i in range(len(tab_children)):
tab.set_title(i, tab_contents[i])
display(tab)
def load_csat3_wind_data_button_click(self, b):
self.csat3_wind_data = self.load_csat3_wind_data()
def load_csat3_wind_data(self):
sys._enablelegacywindowsfsencoding()
root = tk.Tk()
root.call('wm', 'attributes', '.', '-topmost', '1')
root.withdraw()
root.iconbitmap(os.path.join(self.path, 'logo.ico'))
root.update_idletasks()
load_csat3_askopenfilename_dir = filedialog.askopenfilename(
initialdir=self.path, title="Confirme o diretório de importação do arquivo '.csv' do seu modelo de dados de ventos CSAT3:", filetypes=[(".csv", "*.csv")])
if load_csat3_askopenfilename_dir == '':
messagebox.showwarning(
"ondisapy", "Nenhum modelo de dados de ventos CSAT3 importado.")
return None
else:
csat3_dataframe = pd.read_csv(
load_csat3_askopenfilename_dir, sep=';', engine='python', encoding='utf-8', decimal=',')
messagebox.showinfo(
"ondisapy", "Modelo de dados de ventos CSAT3 importado com sucesso:\n%s" % load_csat3_askopenfilename_dir)
print("Modelo de dados de ventos CSAT3 importado:\n%s\n" %
load_csat3_askopenfilename_dir)
return csat3_dataframe
def csat3_wind_data_dataframe(self, csat3_dataframe, project_dirs):
self.csat3_dataframe = csat3_dataframe.copy()
self.project_dirs = project_dirs
if len(self.csat3_dataframe.filter(regex='Unnamed').columns) != 0:
self.csat3_dataframe = self.csat3_dataframe[self.csat3_dataframe.columns.drop(
list(self.csat3_dataframe.filter(regex='Unnamed')))]
if False in self.csat3_dataframe.columns.isin(['TimeStamp', 'Ux', 'Uy', 'Uz', 'Ts', 'batt_volt', 'panel_temp', 'wnd_dir_csat3', 'wnd_dir_compass', 'height_measurement', 'RL', 'RT']):
messagebox.showwarning(
"ondisapy", "Modelo de dados de ventos CSAT3 com colunas nomeadas de forma diferente do modelo fornecido para uso.\nVerifique se seu arquivo .csv é proveniente do modelo correto para prosseguir com as análises.")
return None
else:
self.csat3_dataframe[['Ux', 'Uy', 'Uz', 'Ts', 'batt_volt', 'panel_temp', 'wnd_dir_csat3', 'wnd_dir_compass', 'height_measurement', 'RL', 'RT']] = self.csat3_dataframe[[
'Ux', 'Uy', 'Uz', 'Ts', 'batt_volt', 'panel_temp', 'wnd_dir_csat3', 'wnd_dir_compass', 'height_measurement', 'RL', 'RT']].astype('float64')
csat3_dataframe_len = len(self.csat3_dataframe)
self.csat3_dataframe = self.csat3_dataframe.dropna(
subset=['TimeStamp', 'Ux', 'Uy', 'Uz', 'Ts', 'batt_volt', 'panel_temp', 'wnd_dir_csat3', 'wnd_dir_compass', 'height_measurement'])
self.csat3_dataframe = self.csat3_dataframe.fillna(value='')
csat3_dataframe_drop_na_len = len(self.csat3_dataframe)
if self.uz_checkbox.value == False:
if self.height_adjustment_checkbox.value == True:
processed_wind_speeds_list = [((((float(self.csat3_dataframe['Ux'][i]))**2)+((float(self.csat3_dataframe['Uy'][i]))**2))**(
0.5))*((10/self.csat3_dataframe['height_measurement'][i])**(1/7)) for i in self.csat3_dataframe.index]
else:
processed_wind_speeds_list = [((((float(self.csat3_dataframe['Ux'][i]))**2)+(
(float(self.csat3_dataframe['Uy'][i]))**2))**(0.5)) for i in self.csat3_dataframe.index]
if self.rl_checkbox.value == True:
processed_wind_speeds_list = [
i*self.csat3_dataframe['RL'][0] for i in processed_wind_speeds_list]
if self.rt_checkbox.value == True:
processed_wind_speeds_list = [
i*self.csat3_dataframe['RT'][0] for i in processed_wind_speeds_list]
self.csat3_dataframe['U'] = pd.Series(
processed_wind_speeds_list).values
self.csat3_dataframe['TimeStamp'] = pd.to_datetime(
self.csat3_dataframe['TimeStamp'])
print("Total de linhas sem valores utilizáveis removidas: %i de %i.\n" % (
csat3_dataframe_len-csat3_dataframe_drop_na_len, csat3_dataframe_len))
self.csat3_dataframe = self.csat3_dataframe.iloc[::self.step_int_text.value]
self.csat3_dataframe.reset_index(inplace=True, drop=True)
self.csat3_dataframe.to_csv(os.path.join(self.project_dirs['dir'][2], self.project_dirs['dir'][6].lower(
).replace(' ', '_')+'_csat3'+'.csv').replace('\\', '/'), encoding='utf-8', sep=';', index=True)
display(self.csat3_dataframe)
print("\nDados salvos em:\n%s\n" % os.path.join(self.project_dirs['dir'][2], self.project_dirs['dir'][6].lower(
).replace(' ', '_')+'_csat3'+'.csv').replace('\\', '/').replace('\\', '/'))
return self.csat3_dataframe
elif self.uz_checkbox.value == True:
if self.height_adjustment_checkbox.value == True:
processed_wind_speeds_list = [((((float(self.csat3_dataframe['Ux'][i]))**2)+((float(self.csat3_dataframe['Uy'][i]))**2)+((float(
self.csat3_dataframe['Uz'][i]))**2))**(0.5))*((10/self.csat3_dataframe['height_measurement'][i])**(1/7)) for i in self.csat3_dataframe.index]
else:
processed_wind_speeds_list = [((((float(self.csat3_dataframe['Ux'][i]))**2)+((float(self.csat3_dataframe['Uy'][i]))**2)+(
(float(self.csat3_dataframe['Uz'][i]))**2))**(0.5)) for i in self.csat3_dataframe.index]
if self.rl_checkbox.value == True:
processed_wind_speeds_list = [
i*self.csat3_dataframe['RL'][0] for i in processed_wind_speeds_list]
if self.rt_checkbox.value == True:
processed_wind_speeds_list = [
i*self.csat3_dataframe['RT'][0] for i in processed_wind_speeds_list]
self.csat3_dataframe['U'] = pd.Series(
processed_wind_speeds_list).values
self.csat3_dataframe['TimeStamp'] = pd.to_datetime(
self.csat3_dataframe['TimeStamp'])
print("Total de linhas sem valores utilizáveis removidas: %i de %i." % (
csat3_dataframe_len-csat3_dataframe_drop_na_len, csat3_dataframe_len))
self.csat3_dataframe = self.csat3_dataframe.iloc[::self.step_int_text.value]
self.csat3_dataframe.reset_index(inplace=True, drop=True)
self.csat3_dataframe.to_csv(os.path.join(self.project_dirs['dir'][2], self.project_dirs['dir'][6].lower(
).replace(' ', '_')+'_csat3'+'.csv').replace('\\', '/'), encoding='utf-8', sep=';', index=True)
display(self.csat3_dataframe)
print("\nDados salvos em:\n%s\n" % os.path.join(self.project_dirs['dir'][2], self.project_dirs['dir'][6].lower(
).replace(' ', '_')+'_csat3'+'.csv').replace('\\', '/'))
return self.csat3_dataframe
def csat3_wind_data_windrose(self, csat3_dataframe, project_dirs):
self.csat3_dataframe = csat3_dataframe
self.project_dirs = project_dirs
figure = plt.figure(figsize=(12, 12))
axes = figure.add_axes([0, 0, 1, 1])
axes.set_visible(False)
csat3_windrose_dataframe = pd.DataFrame({'speed': pd.to_numeric(
self.csat3_dataframe['U']), 'direction': pd.to_numeric(self.csat3_dataframe['wnd_dir_compass'])})
axes = WindroseAxes.from_ax(fig=figure)
axes.radii_angle = self.windrose_percentage_angle_float_text.value
axes.bar(csat3_windrose_dataframe['direction'], csat3_windrose_dataframe['speed'],
normed=True, bins=self.bins_int_text.value, opening=0.7, edgecolor='white')
legend_title = ('Velocidades (%s)') % self.speed_unit_text.value
axes.legend(bbox_to_anchor=(1.3, 1), loc=1, title=legend_title)
axes.grid(linewidth=0.5, antialiased=True)
csat3_windrose_outputs_dir = os.path.join(
self.project_dirs['dir'][4], self.project_dirs['dir'][6].lower().replace(' ', '_')+'_wind_data')
try:
os.makedirs(csat3_windrose_outputs_dir)
except OSError as Error:
if os.path.exists(csat3_windrose_outputs_dir):
pass
figure.savefig(os.path.join(csat3_windrose_outputs_dir, self.project_dirs['dir'][6].lower().replace(
' ', '_')+'_windrose_csat3'+'.png').replace('\\', '/'), dpi=600, frameon=False, bbox_inches="tight")
plt.show()
print("\nImagem salva em:\n%s\n" % os.path.join(csat3_windrose_outputs_dir,
self.project_dirs['dir'][6].lower().replace(' ', '_')+'_windrose_csat3'+'.png').replace('\\', '/'))
return(figure, axes)
def csat3_wind_frequencies(self, csat3_windrose, project_dirs):
self.csat3_windrose = csat3_windrose
self.project_dirs = project_dirs
windrose_table = self.csat3_windrose[1]._info['table']
windrose_frequencies = np.sum(windrose_table, axis=0)
windrose_labels = ['N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE',
'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']
figure = plt.figure(figsize=(9, 9))
axes = figure.add_axes([0, 0, 1, 1])
plt.ylabel('Frequências percentuais (%)')
plt.xlabel('Direção (°)')
axes.bar(np.arange(16), windrose_frequencies, align='center',
tick_label=windrose_labels, facecolor='limegreen', zorder=3)
axes_ticks = axes.get_yticks()
axes.set_yticklabels(['{:.1f}%'.format(value) for value in axes_ticks])
axes.grid(axis='y', zorder=0, linestyle='-', color='grey',
linewidth=0.5, antialiased=True, alpha=0.5)
csat3_wind_frequencies_outputs_dir = os.path.join(
self.project_dirs['dir'][4], self.project_dirs['dir'][6].lower().replace(' ', '_')+'_wind_data')
try:
os.makedirs(csat3_wind_frequencies_outputs_dir)
except OSError as Error:
if os.path.exists(csat3_wind_frequencies_outputs_dir):
pass
figure.savefig(os.path.join(csat3_wind_frequencies_outputs_dir, self.project_dirs['dir'][6].lower().replace(
' ', '_')+'_wind_frequencies_csat3'+'.png').replace('\\', '/'), dpi=600, frameon=False, bbox_inches="tight")
plt.show()
print("\nImagem salva em:\n%s\n" % os.path.join(csat3_wind_frequencies_outputs_dir,
self.project_dirs['dir'][6].lower().replace(' ', '_')+'_wind_frequencies_csat3'+'.png').replace('\\', '/'))
def csat3_wind_stats(self, csat3_dataframe, csat3_windrose, project_dirs):
self.csat3_dataframe = csat3_dataframe
self.csat3_windrose = csat3_windrose
self.project_dirs = project_dirs
windrose_directions_array = np.array(
self.csat3_windrose[1]._info['dir'])
windrose_directions_array = np.delete(windrose_directions_array, 0)
windrose_directions_array = np.append(
windrose_directions_array, 348.75)
windrose_directions_list = []
windrose_first_north_direction_split = self.csat3_dataframe[self.csat3_dataframe['wnd_dir_compass'].between(
348.75, 360)]['U']
windrose_second_north_direction_split = self.csat3_dataframe[self.csat3_dataframe['wnd_dir_compass'].between(
0, 11.25)]['U']
windrose_north_direction = pd.concat(
[windrose_first_north_direction_split, windrose_second_north_direction_split], axis=0)
windrose_directions_list.append([len(windrose_north_direction), windrose_north_direction.mean(
), windrose_north_direction.std(), windrose_north_direction.min(), windrose_north_direction.max()])
for i, j in zip(windrose_directions_array[:-1], windrose_directions_array[1:]):
sample_size = len(
self.csat3_dataframe[self.csat3_dataframe['wnd_dir_compass'].between(i, j)]['U'])
mean = self.csat3_dataframe[self.csat3_dataframe['wnd_dir_compass'].between(
i, j)]['U'].mean()
std = self.csat3_dataframe[self.csat3_dataframe['wnd_dir_compass'].between(
i, j)]['U'].std()
mininum = self.csat3_dataframe[self.csat3_dataframe['wnd_dir_compass'].between(
i, j)]['U'].min()
maximum = self.csat3_dataframe[self.csat3_dataframe['wnd_dir_compass'].between(
i, j)]['U'].max()
windrose_directions_list.append(
[sample_size, mean, std, mininum, maximum])
wind_stats_directions_dataframe = pd.DataFrame(
windrose_directions_list)
windrose_table = self.csat3_windrose[1]._info['table']
windrose_frequencies = np.sum(windrose_table, axis=0)
windrose_labels = ['N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE',
'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']
wind_stats_directions_dataframe['direction'] = windrose_labels
wind_stats_directions_dataframe['frequency'] = windrose_frequencies
wind_stats_directions_dataframe = wind_stats_directions_dataframe.round(
decimals=2)
wind_stats_directions_dataframe = wind_stats_directions_dataframe.rename(
columns={0: 'sample_size', 1: 'mean', 2: 'std', 3: 'min', 4: 'max'})
wind_stats_directions_dataframe = wind_stats_directions_dataframe[[
'direction', 'sample_size', 'frequency', 'mean', 'std', 'min', 'max']]
wind_stats_directions_dataframe.to_csv(os.path.join(self.project_dirs['dir'][2], self.project_dirs['dir'][6].lower(
).replace(' ', '_')+'_wind_stats_csat3'+'.csv').replace('\\', '/'), encoding='utf-8', sep=';', index=True)
display(wind_stats_directions_dataframe)
print("\nDados salvos em:\n%s\n" % os.path.join(self.project_dirs['dir'][2], self.project_dirs['dir'][6].lower(
).replace(' ', '_')+'_wind_stats_csat3'+'.csv').replace('\\', '/'))
def csat3_wind_bins(self, csat3_dataframe, csat3_windrose, project_dirs):
self.csat3_dataframe = csat3_dataframe
self.csat3_windrose = csat3_windrose
self.project_dirs = project_dirs
windrose_directions_array = np.array(
self.csat3_windrose[1]._info['dir'])
windrose_directions_array = np.delete(windrose_directions_array, 0)
windrose_directions_array = np.append(
windrose_directions_array, 348.75)
windrose_directions_list = []
windrose_first_north_direction_split = self.csat3_dataframe[self.csat3_dataframe['wnd_dir_compass'].between(
348.75, 360)]['U']
windrose_second_north_direction_split = self.csat3_dataframe[self.csat3_dataframe['wnd_dir_compass'].between(
0, 11.25)]['U']
windrose_north_direction = pd.concat(
[windrose_first_north_direction_split, windrose_second_north_direction_split], axis=0)
windrose_directions_list.append(windrose_north_direction)
for i, j in zip(windrose_directions_array[:-1], windrose_directions_array[1:]):
windrose_direction_speeds = self.csat3_dataframe[self.csat3_dataframe['wnd_dir_compass'].between(
i, j)]['U']
windrose_directions_list.append(windrose_direction_speeds)
windrose_labels = ['N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE',
'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']
windrose_directions_dict = {
windrose_labels[i]: windrose_directions_list[i] for i in range(0, len(windrose_labels))}
for i, j in windrose_directions_dict.items():
figure = plt.figure(figsize=(9, 9))
axes = figure.add_axes([0, 0, 1, 1])
windrose_bins = self.csat3_windrose[1]._info['bins']
windrose_formatted_bins = []
for k in range(0, len(windrose_bins[:-2])):
windrose_bins_interval = str(
'%.1f'+' – '+'%.1f') % (windrose_bins[k], windrose_bins[k+1])
windrose_formatted_bins.append(windrose_bins_interval)
windrose_last_bin = str('≧ '+'%.1f') % windrose_bins[-2]
windrose_formatted_bins.append(windrose_last_bin)
windrose_direction_speeds_dataframe = pd.DataFrame(j)
windrose_direction_speeds_dataframe = windrose_direction_speeds_dataframe.groupby(pd.cut(
windrose_direction_speeds_dataframe['U'], bins=windrose_bins, labels=windrose_formatted_bins, right=False)).count()
windrose_direction_speeds_dataframe['%'] = [
(k/sum(windrose_direction_speeds_dataframe['U']))*100 for k in windrose_direction_speeds_dataframe['U']]
windrose_direction_speeds_dataframe['%'].plot(
ax=axes, kind='bar', legend=False, colormap=None)
axes.set_title('Direção %s' % i)
axes.set_xlabel('Intervalos (%s)' % self.speed_unit_text.value)
axes.set_ylabel('Porcentagem (%)')
axes.autoscale(enable=True, axis='x', tight=None)
for k in axes.get_xticklabels():
k.set_rotation(45)
bins_title = str('_wind_bins_%s' % i)
csat3_wind_bins_outputs_dir = os.path.join(
self.project_dirs['dir'][4], self.project_dirs['dir'][6].lower().replace(' ', '_')+'_wind_data')
try:
os.makedirs(csat3_wind_bins_outputs_dir)
except OSError as Error:
if os.path.exists(csat3_wind_bins_outputs_dir):
pass
figure.savefig(os.path.join(csat3_wind_bins_outputs_dir, self.project_dirs['dir'][6].lower().replace(
' ', '_')+bins_title+'_csat3'+'.png').replace('\\', '/'), dpi=600, frameon=False, bbox_inches="tight", format='png')
plt.show()
print("\nImagem salva em:\n%s\n" % os.path.join(csat3_wind_bins_outputs_dir,
self.project_dirs['dir'][6].lower().replace(' ', '_')+bins_title+'_csat3'+'.png').replace('\\', '/'))
def load_windsonic_wind_data_button_click(self, b):
self.windsonic_wind_data = self.load_windsonic_wind_data()
def load_windsonic_wind_data(self):
sys._enablelegacywindowsfsencoding()
root = tk.Tk()
root.call('wm', 'attributes', '.', '-topmost', '1')
root.withdraw()
root.iconbitmap(os.path.join(self.path, 'logo.ico'))
root.update_idletasks()
load_windsonic_askopenfilename_dir = filedialog.askopenfilename(
initialdir=self.path, title="Confirme o diretório de importação do arquivo '.csv' do seu modelo de dados de ventos Windsonic:", filetypes=[(".csv", "*.csv")])
if load_windsonic_askopenfilename_dir == '':
messagebox.showwarning(
"ondisapy", "Nenhum modelo de dados de ventos Windsonic importado.")
return None
else:
windsonic_dataframe = pd.read_csv(
load_windsonic_askopenfilename_dir, sep=';', engine='python', encoding='utf-8', decimal=',')
messagebox.showinfo(
"ondisapy", "Modelo de dados de ventos Windsonic importado com sucesso:\n%s" % load_windsonic_askopenfilename_dir)
print("Modelo Windsonic importado:\n%s\n" %
load_windsonic_askopenfilename_dir)
return windsonic_dataframe
def windsonic_wind_data_dataframe(self, windsonic_dataframe, project_dirs):
self.windsonic_dataframe = windsonic_dataframe.copy()
self.project_dirs = project_dirs
if len(self.windsonic_dataframe.filter(regex='Unnamed').columns) != 0:
self.windsonic_dataframe = self.windsonic_dataframe[self.windsonic_dataframe.columns.drop(
list(self.windsonic_dataframe.filter(regex='Unnamed')))]
if False in self.windsonic_dataframe.columns.isin(['TIMESTAMP', 'mean_wind_speed', 'mean_wind_direction', 'height_measurement', 'RL', 'RT']):
messagebox.showwarning(
"ondisapy", "Arquivo de dados de vento com colunas nomeadas de forma diferente do modelo fornecido para uso.\nVerifique se seu arquivo .csv é proveniente do modelo correto para prosseguir com as análises.")
return None
else:
self.windsonic_dataframe[['mean_wind_speed', 'mean_wind_direction', 'height_measurement', 'RL', 'RT']] = self.windsonic_dataframe[[
'mean_wind_speed', 'mean_wind_direction', 'height_measurement', 'RL', 'RT']].astype('float64')
windsonic_dataframe_len = len(self.windsonic_dataframe)
self.windsonic_dataframe = self.windsonic_dataframe.dropna(
subset=['TIMESTAMP', 'mean_wind_speed', 'mean_wind_direction', 'height_measurement'])
self.windsonic_dataframe = self.windsonic_dataframe.fillna(
value='')
windsonic_dataframe_drop_na_len = len(self.windsonic_dataframe)
if self.height_adjustment_checkbox.value == True:
processed_wind_speeds_list = [float(self.windsonic_dataframe['mean_wind_speed'][i]*(
(10/self.windsonic_dataframe['height_measurement'][i])**(1/7))) for i in self.windsonic_dataframe.index]
else:
processed_wind_speeds_list = [float(
self.windsonic_dataframe['mean_wind_speed'][i]) for i in self.windsonic_dataframe.index]
if self.rl_checkbox.value == True:
processed_wind_speeds_list = [
i*self.windsonic_dataframe['RL'][0] for i in processed_wind_speeds_list]
if self.rt_checkbox.value == True:
processed_wind_speeds_list = [
i*self.windsonic_dataframe['RT'][0] for i in processed_wind_speeds_list]
self.windsonic_dataframe['U'] = pd.Series(
processed_wind_speeds_list).values
self.windsonic_dataframe['TIMESTAMP'] = pd.to_datetime(
self.windsonic_dataframe['TIMESTAMP'])
print("Total de linhas sem valores utilizáveis removidas: %i de %i.\n" % (
windsonic_dataframe_len-windsonic_dataframe_drop_na_len, windsonic_dataframe_len))
self.windsonic_dataframe = self.windsonic_dataframe.iloc[::self.step_int_text.value]
self.windsonic_dataframe.reset_index(inplace=True, drop=True)
self.windsonic_dataframe.to_csv(os.path.join(self.project_dirs['dir'][2], self.project_dirs['dir'][6].lower(
).replace(' ', '_')+'_windsonic'+'.csv').replace('\\', '/'), encoding='utf-8', sep=';', index=True)
display(self.windsonic_dataframe)
print("\nDados salvos em:\n%s\n" % os.path.join(self.project_dirs['dir'][2], self.project_dirs['dir'][6].lower(
).replace(' ', '_')+'_windsonic'+'.csv').replace('\\', '/'))
return self.windsonic_dataframe
def windsonic_wind_data_windrose(self, windsonic_dataframe, project_dirs):
self.windsonic_dataframe = windsonic_dataframe
self.project_dirs = project_dirs
figure = plt.figure(figsize=(12, 12))
axes = figure.add_axes([0, 0, 1, 1])
axes.set_visible(False)
windsonic_windrose_dataframe = pd.DataFrame({'speed': pd.to_numeric(
self.windsonic_dataframe['U']), 'direction': | pd.to_numeric(self.windsonic_dataframe['mean_wind_direction']) | pandas.to_numeric |
import pandas as pd
from datacollection.models import Event, URL, CustomSession
from django_pandas.io import read_frame
import numpy as np
import json
import hashlib
import collections
from datetime import datetime
from datetime import timedelta
from collections import OrderedDict
from math import nan
import copy
pd.options.mode.chained_assignment = None # default='warn
def sequenceWithinPuzzlesForMisconceptions(dataEvents, group = 'all'):
tutorialList = ['1. One Box', '2. Separated Boxes', '3. Rotate a Pyramid', '4. Match Silhouettes', '5. Removing Objects', '6. Stretch a Ramp', '7. Max 2 Boxes', '8. Combine 2 Ramps', '9. Scaling Round Objects']
#Remove SandBox and tutorial levels.
dataEvents['group'] = [json.loads(x)['group'] if 'group' in json.loads(x).keys() else '' for x in dataEvents['data']]
dataEvents['user'] = [json.loads(x)['user'] if 'user' in json.loads(x).keys() else '' for x in dataEvents['data']]
# removing those rows where we dont have a group and a user that is not guest
dataEvents = dataEvents[((dataEvents['group'] != '') & (dataEvents['user'] != '') & (dataEvents['user'] != 'guest'))]
dataEvents['group_user_id'] = dataEvents['group'] + '~' + dataEvents['user']
# filtering to only take the group passed as argument
if(group != 'all'):
dataEvents = dataEvents[dataEvents['group'].isin(group)]
# Data Cleaning
dataEvents['time'] = | pd.to_datetime(dataEvents['time']) | pandas.to_datetime |
from operator import methodcaller
import numpy as np
import pandas as pd
import pytest
from pandas.util import testing as tm
import ibis
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
from ibis.expr.scope import Scope
from ibis.expr.window import get_preceding_value, rows_with_max_lookback
from ibis.udf.vectorized import reduction
from ... import Backend, PandasClient, execute
from ...aggcontext import AggregationContext, window_agg_udf
from ...dispatch import pre_execute
from ...execution.window import get_aggcontext
pytestmark = pytest.mark.pandas
# These custom classes are used inn test_custom_window_udf
class CustomInterval:
def __init__(self, value):
self.value = value
# These are necessary because ibis.expr.window
# will compare preceding and following
# with 0 to see if they are valid
def __lt__(self, other):
return self.value < other
def __gt__(self, other):
return self.value > other
class CustomWindow(ibis.expr.window.Window):
""" This is a dummy custom window that return n preceding rows
where n is defined by CustomInterval.value."""
def _replace(self, **kwds):
new_kwds = {
'group_by': kwds.get('group_by', self._group_by),
'order_by': kwds.get('order_by', self._order_by),
'preceding': kwds.get('preceding', self.preceding),
'following': kwds.get('following', self.following),
'max_lookback': kwds.get('max_lookback', self.max_lookback),
'how': kwds.get('how', self.how),
}
return CustomWindow(**new_kwds)
class CustomAggContext(AggregationContext):
def __init__(
self, parent, group_by, order_by, output_type, max_lookback, preceding
):
super().__init__(
parent=parent,
group_by=group_by,
order_by=order_by,
output_type=output_type,
max_lookback=max_lookback,
)
self.preceding = preceding
def agg(self, grouped_data, function, *args, **kwargs):
upper_indices = pd.Series(range(1, len(self.parent) + 2))
window_sizes = (
grouped_data.rolling(self.preceding.value + 1)
.count()
.reset_index(drop=True)
)
lower_indices = upper_indices - window_sizes
mask = upper_indices.notna()
result_index = grouped_data.obj.index
result = window_agg_udf(
grouped_data,
function,
lower_indices,
upper_indices,
mask,
result_index,
self.dtype,
self.max_lookback,
*args,
**kwargs,
)
return result
@pytest.fixture(scope='session')
def sort_kind():
return 'mergesort'
default = pytest.mark.parametrize('default', [ibis.NA, ibis.literal('a')])
row_offset = pytest.mark.parametrize(
'row_offset', list(map(ibis.literal, [-1, 1, 0]))
)
range_offset = pytest.mark.parametrize(
'range_offset',
[
ibis.interval(days=1),
2 * ibis.interval(days=1),
-2 * ibis.interval(days=1),
],
)
@pytest.fixture
def row_window():
return ibis.window(following=0, order_by='plain_int64')
@pytest.fixture
def range_window():
return ibis.window(following=0, order_by='plain_datetimes_naive')
@pytest.fixture
def custom_window():
return CustomWindow(
preceding=CustomInterval(1),
following=0,
group_by='dup_ints',
order_by='plain_int64',
)
@default
@row_offset
def test_lead(t, df, row_offset, default, row_window):
expr = t.dup_strings.lead(row_offset, default=default).over(row_window)
result = expr.execute()
expected = df.dup_strings.shift(execute(-row_offset))
if default is not ibis.NA:
expected = expected.fillna(execute(default))
tm.assert_series_equal(result, expected)
@default
@row_offset
def test_lag(t, df, row_offset, default, row_window):
expr = t.dup_strings.lag(row_offset, default=default).over(row_window)
result = expr.execute()
expected = df.dup_strings.shift(execute(row_offset))
if default is not ibis.NA:
expected = expected.fillna(execute(default))
tm.assert_series_equal(result, expected)
@default
@range_offset
def test_lead_delta(t, df, range_offset, default, range_window):
expr = t.dup_strings.lead(range_offset, default=default).over(range_window)
result = expr.execute()
expected = (
df[['plain_datetimes_naive', 'dup_strings']]
.set_index('plain_datetimes_naive')
.squeeze()
.shift(freq=execute(-range_offset))
.reindex(df.plain_datetimes_naive)
.reset_index(drop=True)
)
if default is not ibis.NA:
expected = expected.fillna(execute(default))
tm.assert_series_equal(result, expected)
@default
@range_offset
def test_lag_delta(t, df, range_offset, default, range_window):
expr = t.dup_strings.lag(range_offset, default=default).over(range_window)
result = expr.execute()
expected = (
df[['plain_datetimes_naive', 'dup_strings']]
.set_index('plain_datetimes_naive')
.squeeze()
.shift(freq=execute(range_offset))
.reindex(df.plain_datetimes_naive)
.reset_index(drop=True)
)
if default is not ibis.NA:
expected = expected.fillna(execute(default))
tm.assert_series_equal(result, expected)
def test_first(t, df):
expr = t.dup_strings.first()
result = expr.execute()
assert result == df.dup_strings.iloc[0]
def test_last(t, df):
expr = t.dup_strings.last()
result = expr.execute()
assert result == df.dup_strings.iloc[-1]
def test_group_by_mutate_analytic(t, df):
gb = t.groupby(t.dup_strings)
expr = gb.mutate(
first_value=t.plain_int64.first(),
last_value=t.plain_strings.last(),
avg_broadcast=t.plain_float64 - t.plain_float64.mean(),
delta=(t.plain_int64 - t.plain_int64.lag())
/ (t.plain_float64 - t.plain_float64.lag()),
)
result = expr.execute()
gb = df.groupby('dup_strings')
expected = df.assign(
last_value=gb.plain_strings.transform('last'),
first_value=gb.plain_int64.transform('first'),
avg_broadcast=df.plain_float64 - gb.plain_float64.transform('mean'),
delta=(
(df.plain_int64 - gb.plain_int64.shift(1))
/ (df.plain_float64 - gb.plain_float64.shift(1))
),
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_players(players, players_df):
lagged = players.mutate(pct=lambda t: t.G - t.G.lag())
expected = players_df.assign(
pct=players_df.G - players_df.groupby('playerID').G.shift(1)
)
cols = expected.columns.tolist()
result = lagged.execute()[cols].sort_values(cols).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_batting_filter_mean(batting, batting_df):
expr = batting[batting.G > batting.G.mean()]
result = expr.execute()
expected = batting_df[batting_df.G > batting_df.G.mean()].reset_index(
drop=True
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_batting_zscore(players, players_df):
expr = players.mutate(g_z=lambda t: (t.G - t.G.mean()) / t.G.std())
gb = players_df.groupby('playerID')
expected = players_df.assign(
g_z=(players_df.G - gb.G.transform('mean')) / gb.G.transform('std')
)
cols = expected.columns.tolist()
result = expr.execute()[cols].sort_values(cols).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_batting_avg_change_in_games_per_year(players, players_df):
expr = players.mutate(
delta=lambda t: (t.G - t.G.lag()) / (t.yearID - t.yearID.lag())
)
gb = players_df.groupby('playerID')
expected = players_df.assign(
delta=(players_df.G - gb.G.shift(1))
/ (players_df.yearID - gb.yearID.shift(1))
)
cols = expected.columns.tolist()
result = expr.execute()[cols].sort_values(cols).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(AssertionError, reason='NYI')
def test_batting_most_hits(players, players_df):
expr = players.mutate(
hits_rank=lambda t: t.H.rank().over(
ibis.cumulative_window(order_by=ibis.desc(t.H))
)
)
result = expr.execute()
hits_rank = players_df.groupby('playerID').H.rank(
method='min', ascending=False
)
expected = players_df.assign(hits_rank=hits_rank)
tm.assert_frame_equal(result[expected.columns], expected)
def test_batting_quantile(players, players_df):
expr = players.mutate(hits_quantile=lambda t: t.H.quantile(0.25))
hits_quantile = players_df.groupby('playerID').H.transform(
'quantile', 0.25
)
expected = players_df.assign(hits_quantile=hits_quantile)
cols = expected.columns.tolist()
result = expr.execute()[cols].sort_values(cols).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('op', ['sum', 'mean', 'min', 'max'])
def test_batting_specific_cumulative(batting, batting_df, op, sort_kind):
ibis_method = methodcaller('cum{}'.format(op))
expr = ibis_method(batting.sort_by([batting.yearID]).G)
result = expr.execute().astype('float64')
pandas_method = methodcaller(op)
expected = pandas_method(
batting_df[['G', 'yearID']]
.sort_values('yearID', kind=sort_kind)
.G.expanding()
).reset_index(drop=True)
tm.assert_series_equal(result, expected)
def test_batting_cumulative(batting, batting_df, sort_kind):
expr = batting.mutate(
more_values=lambda t: t.G.sum().over(
ibis.cumulative_window(order_by=t.yearID)
)
)
result = expr.execute()
columns = ['G', 'yearID']
more_values = (
batting_df[columns]
.sort_values('yearID', kind=sort_kind)
.G.expanding()
.sum()
.astype('int64')
)
expected = batting_df.assign(more_values=more_values)
tm.assert_frame_equal(result[expected.columns], expected)
def test_batting_cumulative_partitioned(batting, batting_df, sort_kind):
group_by = 'playerID'
order_by = 'yearID'
t = batting
expr = t.G.sum().over(
ibis.cumulative_window(order_by=order_by, group_by=group_by)
)
expr = t.mutate(cumulative=expr)
result = expr.execute()
columns = [group_by, order_by, 'G']
expected = (
batting_df[columns]
.set_index(order_by)
.groupby(group_by)
.G.expanding()
.sum()
.rename('cumulative')
)
tm.assert_series_equal(
result.set_index([group_by, order_by]).sort_index().cumulative,
expected.sort_index().astype("int64"),
)
def test_batting_rolling(batting, batting_df, sort_kind):
expr = batting.mutate(
more_values=lambda t: t.G.sum().over(
ibis.trailing_window(5, order_by=t.yearID)
)
)
result = expr.execute()
columns = ['G', 'yearID']
more_values = (
batting_df[columns]
.sort_values('yearID', kind=sort_kind)
.G.rolling(6, min_periods=1)
.sum()
.astype('int64')
)
expected = batting_df.assign(more_values=more_values)
tm.assert_frame_equal(result[expected.columns], expected)
def test_batting_rolling_partitioned(batting, batting_df, sort_kind):
t = batting
group_by = 'playerID'
order_by = 'yearID'
expr = t.G.sum().over(
ibis.trailing_window(3, order_by=t[order_by], group_by=t[group_by])
)
expr = t.mutate(rolled=expr)
result = expr.execute()
columns = [group_by, order_by, 'G']
expected = (
batting_df[columns]
.set_index(order_by)
.groupby(group_by)
.G.rolling(4, min_periods=1)
.sum()
.rename('rolled')
)
tm.assert_series_equal(
result.set_index([group_by, order_by]).sort_index().rolled,
expected.sort_index().astype("int64"),
)
@pytest.mark.parametrize(
'window',
[
ibis.window(order_by='yearID'),
ibis.window(order_by='yearID', group_by='playerID'),
],
)
def test_window_failure_mode(batting, batting_df, window):
# can't have order by without a following value of 0
expr = batting.mutate(more_values=batting.G.sum().over(window))
with pytest.raises(ibis.common.exceptions.OperationNotDefinedError):
expr.execute()
def test_scalar_broadcasting(batting, batting_df):
expr = batting.mutate(demeaned=batting.G - batting.G.mean())
result = expr.execute()
expected = batting_df.assign(demeaned=batting_df.G - batting_df.G.mean())
tm.assert_frame_equal(result, expected)
def test_mutate_with_window_after_join(sort_kind):
left_df = pd.DataFrame(
{
'ints': [0, 1, 2],
'strings': ['a', 'b', 'c'],
'dates': pd.date_range('20170101', periods=3),
}
)
right_df = pd.DataFrame(
{
'group': [0, 1, 2] * 3,
'value': [0, 1, np.nan, 3, 4, np.nan, 6, 7, 8],
}
)
con = Backend().connect({'left': left_df, 'right': right_df})
left, right = map(con.table, ('left', 'right'))
joined = left.outer_join(right, left.ints == right.group)
proj = joined[left, right.value]
expr = proj.groupby('ints').mutate(sum=proj.value.sum())
result = expr.execute()
expected = pd.DataFrame(
{
'dates': pd.concat([left_df.dates] * 3)
.sort_values(kind=sort_kind)
.reset_index(drop=True),
'ints': [0] * 3 + [1] * 3 + [2] * 3,
'strings': ['a'] * 3 + ['b'] * 3 + ['c'] * 3,
'value': [0.0, 3.0, 6.0, 1.0, 4.0, 7.0, np.nan, np.nan, 8.0],
'sum': [9.0] * 3 + [12.0] * 3 + [8.0] * 3,
}
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_mutate_scalar_with_window_after_join():
left_df = pd.DataFrame({'ints': range(3)})
right_df = pd.DataFrame(
{
'group': [0, 1, 2] * 3,
'value': [0, 1, np.nan, 3, 4, np.nan, 6, 7, 8],
}
)
con = Backend().connect({'left': left_df, 'right': right_df})
left, right = map(con.table, ('left', 'right'))
joined = left.outer_join(right, left.ints == right.group)
proj = joined[left, right.value]
expr = proj.mutate(sum=proj.value.sum(), const=1)
result = expr.execute()
expected = pd.DataFrame(
{
'ints': [0] * 3 + [1] * 3 + [2] * 3,
'value': [0.0, 3.0, 6.0, 1.0, 4.0, 7.0, np.nan, np.nan, 8.0],
'sum': [29.0] * 9,
'const': [1] * 9,
}
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_project_scalar_after_join():
left_df = pd.DataFrame({'ints': range(3)})
right_df = pd.DataFrame(
{
'group': [0, 1, 2] * 3,
'value': [0, 1, np.nan, 3, 4, np.nan, 6, 7, 8],
}
)
con = Backend().connect({'left': left_df, 'right': right_df})
left, right = map(con.table, ('left', 'right'))
joined = left.outer_join(right, left.ints == right.group)
proj = joined[left, right.value]
expr = proj[proj.value.sum().name('sum'), ibis.literal(1).name('const')]
result = expr.execute()
expected = pd.DataFrame({'sum': [29.0] * 9, 'const': [1] * 9})
tm.assert_frame_equal(result[expected.columns], expected)
def test_project_list_scalar():
df = pd.DataFrame({'ints': range(3)})
con = Backend().connect({'df': df})
expr = con.table('df')
result = expr.mutate(res=expr.ints.quantile([0.5, 0.95])).execute()
tm.assert_series_equal(
result.res, pd.Series([[1.0, 1.9] for _ in range(0, 3)], name='res')
)
@pytest.mark.parametrize(
'index',
[
pytest.param(lambda time: None, id='no_index'),
pytest.param(lambda time: time, id='index'),
],
)
def test_window_with_preceding_expr(index):
time = pd.date_range('20180101', '20180110')
start = 2
data = np.arange(start, start + len(time))
df = pd.DataFrame({'value': data, 'time': time}, index=index(time))
client = Backend().connect({'df': df})
t = client.table('df')
expected = (
df.set_index('time')
.value.rolling('3d', closed='both')
.mean()
.reset_index(drop=True)
)
expected.index.name = None
day = ibis.interval(days=1)
window = ibis.trailing_window(3 * day, order_by=t.time)
expr = t.value.mean().over(window)
result = expr.execute()
tm.assert_series_equal(result, expected)
def test_window_with_mlb():
index = | pd.date_range('20170501', '20170507') | pandas.date_range |
# ###########################################################################
#
# CLOUDERA APPLIED MACHINE LEARNING PROTOTYPE (AMP)
# (C) Cloudera, Inc. 2021
# All rights reserved.
#
# Applicable Open Source License: Apache 2.0
#
# NOTE: Cloudera open source products are modular software products
# made up of hundreds of individual components, each of which was
# individually copyrighted. Each Cloudera open source product is a
# collective work under U.S. Copyright Law. Your license to use the
# collective work is as provided in your written agreement with
# Cloudera. Used apart from the collective work, this file is
# licensed for your use pursuant to the open source license
# identified above.
#
# This code is provided to you pursuant a written agreement with
# (i) Cloudera, Inc. or (ii) a third-party authorized to distribute
# this code. If you do not have a written agreement with Cloudera nor
# with an authorized and properly licensed third party, you do not
# have any rights to access nor to use this code.
#
# Absent a written agreement with Cloudera, Inc. (“Cloudera”) to the
# contrary, A) CLOUDERA PROVIDES THIS CODE TO YOU WITHOUT WARRANTIES OF ANY
# KIND; (B) CLOUDERA DISCLAIMS ANY AND ALL EXPRESS AND IMPLIED
# WARRANTIES WITH RESPECT TO THIS CODE, INCLUDING BUT NOT LIMITED TO
# IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE; (C) CLOUDERA IS NOT LIABLE TO YOU,
# AND WILL NOT DEFEND, INDEMNIFY, NOR HOLD YOU HARMLESS FOR ANY CLAIMS
# ARISING FROM OR RELATED TO THE CODE; AND (D)WITH RESPECT TO YOUR EXERCISE
# OF ANY RIGHTS GRANTED TO YOU FOR THE CODE, CLOUDERA IS NOT LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE OR
# CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT LIMITED TO, DAMAGES
# RELATED TO LOST REVENUE, LOST PROFITS, LOSS OF INCOME, LOSS OF
# BUSINESS ADVANTAGE OR UNAVAILABILITY, OR LOSS OR CORRUPTION OF
# DATA.
#
# ###########################################################################
import os
import json
import numpy as np
import pandas as pd
from qa.utils import set_seed
DEFAULT_SIZES = [500, 1000, 1500, 2000, 2500, 3000]
def randomize_indices(data):
idx = np.arange(len(data))
return np.random.permutation(idx)
def partition_data(data, indices_or_sections=None, seed=42):
"""
data should be a ... what??? list?
partitions can be a number (as in, the number of partitions) or a list of data sizes?
"""
set_seed(seed)
dd = np.array(data)
idx = randomize_indices(data)
idx_chunks = np.array_split(idx, indices_or_sections)
partitions = [list(dd[chunk]) for chunk in idx_chunks]
return partitions
def create_increasing_sized_train_sets(json_data_file, sizes=DEFAULT_SIZES, **kwargs):
"""
json_data_file: filename of the original, full dataset from which to create subsets
sizes: list of dataset sizes to partition the original dataset into
these will translate into increasing sized datasets, with each
successive dataset consisting of the previous subset's examples plus
the number of additional examples identified in the splits
Takes filename of a json dataset and the desired sizes and creates
subsets of increasing size. These subsets are saved to the directory
associated with the json_data_file.
"""
outfile_prefix = os.path.splitext(json_data_file)[0]
json_data = json.load(open(json_data_file, "r"))
data_chunks = partition_data(json_data["data"][0]["paragraphs"], sizes, **kwargs)
new_json = {"data": [{"paragraphs": []}]}
for chunk in data_chunks:
try:
num_examples += len(chunk)
except:
num_examples = len(chunk)
new_json["data"][0]["paragraphs"] += chunk
json.dump(new_json, open(f"{outfile_prefix}_{num_examples}.json", "w"))
def load_results(data_dir):
data = json.load(open(data_dir + "/results_.json", "r"))
return | pd.DataFrame(data, index=[0]) | pandas.DataFrame |
import warnings
import itertools
from copy import copy
from functools import partial
from collections import UserString
from collections.abc import Iterable, Sequence, Mapping
from numbers import Number
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import matplotlib as mpl
from ._decorators import (
share_init_params_with_map,
)
from .palettes import (
QUAL_PALETTES,
color_palette,
)
from .utils import (
_check_argument,
get_color_cycle,
remove_na,
)
class SemanticMapping:
"""Base class for mapping data values to plot attributes."""
# -- Default attributes that all SemanticMapping subclasses must set
# Whether the mapping is numeric, categorical, or datetime
map_type = None
# Ordered list of unique values in the input data
levels = None
# A mapping from the data values to corresponding plot attributes
lookup_table = None
def __init__(self, plotter):
# TODO Putting this here so we can continue to use a lot of the
# logic that's built into the library, but the idea of this class
# is to move towards semantic mappings that are agnostic about the
# kind of plot they're going to be used to draw.
# Fully achieving that is going to take some thinking.
self.plotter = plotter
def map(cls, plotter, *args, **kwargs):
# This method is assigned the __init__ docstring
method_name = "_{}_map".format(cls.__name__[:-7].lower())
setattr(plotter, method_name, cls(plotter, *args, **kwargs))
return plotter
def _lookup_single(self, key):
"""Apply the mapping to a single data value."""
return self.lookup_table[key]
def __call__(self, key, *args, **kwargs):
"""Get the attribute(s) values for the data key."""
if isinstance(key, (list, np.ndarray, pd.Series)):
return [self._lookup_single(k, *args, **kwargs) for k in key]
else:
return self._lookup_single(key, *args, **kwargs)
@share_init_params_with_map
class HueMapping(SemanticMapping):
"""Mapping that sets artist colors according to data values."""
# A specification of the colors that should appear in the plot
palette = None
# An object that normalizes data values to [0, 1] range for color mapping
norm = None
# A continuous colormap object for interpolating in a numeric context
cmap = None
def __init__(
self, plotter, palette=None, order=None, norm=None,
):
"""Map the levels of the `hue` variable to distinct colors.
Parameters
----------
# TODO add generic parameters
"""
super().__init__(plotter)
data = plotter.plot_data.get("hue", pd.Series(dtype=float))
if data.notna().any():
map_type = self.infer_map_type(
palette, norm, plotter.input_format, plotter.var_types["hue"]
)
# Our goal is to end up with a dictionary mapping every unique
# value in `data` to a color. We will also keep track of the
# metadata about this mapping we will need for, e.g., a legend
# --- Option 1: numeric mapping with a matplotlib colormap
if map_type == "numeric":
data = pd.to_numeric(data)
levels, lookup_table, norm, cmap = self.numeric_mapping(
data, palette, norm,
)
# --- Option 2: categorical mapping using seaborn palette
elif map_type == "categorical":
cmap = norm = None
levels, lookup_table = self.categorical_mapping(
data, palette, order,
)
# --- Option 3: datetime mapping
else:
# TODO this needs actual implementation
cmap = norm = None
levels, lookup_table = self.categorical_mapping(
# Casting data to list to handle differences in the way
# pandas and numpy represent datetime64 data
list(data), palette, order,
)
self.map_type = map_type
self.lookup_table = lookup_table
self.palette = palette
self.levels = levels
self.norm = norm
self.cmap = cmap
def _lookup_single(self, key):
"""Get the color for a single value, using colormap to interpolate."""
try:
# Use a value that's in the original data vector
value = self.lookup_table[key]
except KeyError:
# Use the colormap to interpolate between existing datapoints
# (e.g. in the context of making a continuous legend)
try:
normed = self.norm(key)
except TypeError as err:
if np.isnan(key):
value = (0, 0, 0, 0)
else:
raise err
else:
if np.ma.is_masked(normed):
normed = np.nan
value = self.cmap(normed)
return value
def infer_map_type(self, palette, norm, input_format, var_type):
"""Determine how to implement the mapping."""
if palette in QUAL_PALETTES:
map_type = "categorical"
elif norm is not None:
map_type = "numeric"
elif isinstance(palette, (dict, list)):
map_type = "categorical"
elif input_format == "wide":
map_type = "categorical"
else:
map_type = var_type
return map_type
def categorical_mapping(self, data, palette, order):
"""Determine colors when the hue mapping is categorical."""
# -- Identify the order and name of the levels
levels = categorical_order(data, order)
n_colors = len(levels)
# -- Identify the set of colors to use
if isinstance(palette, dict):
missing = set(levels) - set(palette)
if any(missing):
err = "The palette dictionary is missing keys: {}"
raise ValueError(err.format(missing))
lookup_table = palette
else:
if palette is None:
if n_colors <= len(get_color_cycle()):
colors = color_palette(None, n_colors)
else:
colors = color_palette("husl", n_colors)
elif isinstance(palette, list):
if len(palette) != n_colors:
err = "The palette list has the wrong number of colors."
raise ValueError(err)
colors = palette
else:
colors = color_palette(palette, n_colors)
lookup_table = dict(zip(levels, colors))
return levels, lookup_table
def numeric_mapping(self, data, palette, norm):
"""Determine colors when the hue variable is quantitative."""
if isinstance(palette, dict):
# The presence of a norm object overrides a dictionary of hues
# in specifying a numeric mapping, so we need to process it here.
levels = list(sorted(palette))
colors = [palette[k] for k in sorted(palette)]
cmap = mpl.colors.ListedColormap(colors)
lookup_table = palette.copy()
else:
# The levels are the sorted unique values in the data
levels = list(np.sort(remove_na(data.unique())))
# --- Sort out the colormap to use from the palette argument
# Default numeric palette is our default cubehelix palette
# TODO do we want to do something complicated to ensure contrast?
palette = "ch:" if palette is None else palette
if isinstance(palette, mpl.colors.Colormap):
cmap = palette
else:
cmap = color_palette(palette, as_cmap=True)
# Now sort out the data normalization
if norm is None:
norm = mpl.colors.Normalize()
elif isinstance(norm, tuple):
norm = mpl.colors.Normalize(*norm)
elif not isinstance(norm, mpl.colors.Normalize):
err = "``hue_norm`` must be None, tuple, or Normalize object."
raise ValueError(err)
if not norm.scaled():
norm(np.asarray(data.dropna()))
lookup_table = dict(zip(levels, cmap(norm(levels))))
return levels, lookup_table, norm, cmap
@share_init_params_with_map
class SizeMapping(SemanticMapping):
"""Mapping that sets artist sizes according to data values."""
# An object that normalizes data values to [0, 1] range
norm = None
def __init__(
self, plotter, sizes=None, order=None, norm=None,
):
"""Map the levels of the `size` variable to distinct values.
Parameters
----------
# TODO add generic parameters
"""
super().__init__(plotter)
data = plotter.plot_data.get("size", pd.Series(dtype=float))
if data.notna().any():
map_type = self.infer_map_type(
norm, sizes, plotter.var_types["size"]
)
# --- Option 1: numeric mapping
if map_type == "numeric":
levels, lookup_table, norm, size_range = self.numeric_mapping(
data, sizes, norm,
)
# --- Option 2: categorical mapping
elif map_type == "categorical":
levels, lookup_table = self.categorical_mapping(
data, sizes, order,
)
size_range = None
# --- Option 3: datetime mapping
# TODO this needs an actual implementation
else:
levels, lookup_table = self.categorical_mapping(
# Casting data to list to handle differences in the way
# pandas and numpy represent datetime64 data
list(data), sizes, order,
)
size_range = None
self.map_type = map_type
self.levels = levels
self.norm = norm
self.sizes = sizes
self.size_range = size_range
self.lookup_table = lookup_table
def infer_map_type(self, norm, sizes, var_type):
if norm is not None:
map_type = "numeric"
elif isinstance(sizes, (dict, list)):
map_type = "categorical"
else:
map_type = var_type
return map_type
def _lookup_single(self, key):
try:
value = self.lookup_table[key]
except KeyError:
normed = self.norm(key)
if np.ma.is_masked(normed):
normed = np.nan
value = self.size_range[0] + normed * np.ptp(self.size_range)
return value
def categorical_mapping(self, data, sizes, order):
levels = categorical_order(data, order)
if isinstance(sizes, dict):
# Dict inputs map existing data values to the size attribute
missing = set(levels) - set(sizes)
if any(missing):
err = f"Missing sizes for the following levels: {missing}"
raise ValueError(err)
lookup_table = sizes.copy()
elif isinstance(sizes, list):
# List inputs give size values in the same order as the levels
if len(sizes) != len(levels):
err = "The `sizes` list has the wrong number of values."
raise ValueError(err)
lookup_table = dict(zip(levels, sizes))
else:
if isinstance(sizes, tuple):
# Tuple input sets the min, max size values
if len(sizes) != 2:
err = "A `sizes` tuple must have only 2 values"
raise ValueError(err)
elif sizes is not None:
err = f"Value for `sizes` not understood: {sizes}"
raise ValueError(err)
else:
# Otherwise, we need to get the min, max size values from
# the plotter object we are attached to.
# TODO this is going to cause us trouble later, because we
# want to restructure things so that the plotter is generic
# across the visual representation of the data. But at this
# point, we don't know the visual representation. Likely we
# want to change the logic of this Mapping so that it gives
# points on a normalized range that then gets un-normalized
# when we know what we're drawing. But given the way the
# package works now, this way is cleanest.
sizes = self.plotter._default_size_range
# For categorical sizes, use regularly-spaced linear steps
# between the minimum and maximum sizes. Then reverse the
# ramp so that the largest value is used for the first entry
# in size_order, etc. This is because "ordered" categories
# are often though to go in decreasing priority.
sizes = np.linspace(*sizes, len(levels))[::-1]
lookup_table = dict(zip(levels, sizes))
return levels, lookup_table
def numeric_mapping(self, data, sizes, norm):
if isinstance(sizes, dict):
# The presence of a norm object overrides a dictionary of sizes
# in specifying a numeric mapping, so we need to process it
# dictionary here
levels = list(np.sort(list(sizes)))
size_values = sizes.values()
size_range = min(size_values), max(size_values)
else:
# The levels here will be the unique values in the data
levels = list(np.sort(remove_na(data.unique())))
if isinstance(sizes, tuple):
# For numeric inputs, the size can be parametrized by
# the minimum and maximum artist values to map to. The
# norm object that gets set up next specifies how to
# do the mapping.
if len(sizes) != 2:
err = "A `sizes` tuple must have only 2 values"
raise ValueError(err)
size_range = sizes
elif sizes is not None:
err = f"Value for `sizes` not understood: {sizes}"
raise ValueError(err)
else:
# When not provided, we get the size range from the plotter
# object we are attached to. See the note in the categorical
# method about how this is suboptimal for future development.
size_range = self.plotter._default_size_range
# Now that we know the minimum and maximum sizes that will get drawn,
# we need to map the data values that we have into that range. We will
# use a matplotlib Normalize class, which is typically used for numeric
# color mapping but works fine here too. It takes data values and maps
# them into a [0, 1] interval, potentially nonlinear-ly.
if norm is None:
# Default is a linear function between the min and max data values
norm = mpl.colors.Normalize()
elif isinstance(norm, tuple):
# It is also possible to give different limits in data space
norm = mpl.colors.Normalize(*norm)
elif not isinstance(norm, mpl.colors.Normalize):
err = f"Value for size `norm` parameter not understood: {norm}"
raise ValueError(err)
else:
# If provided with Normalize object, copy it so we can modify
norm = copy(norm)
# Set the mapping so all output values are in [0, 1]
norm.clip = True
# If the input range is not set, use the full range of the data
if not norm.scaled():
norm(levels)
# Map from data values to [0, 1] range
sizes_scaled = norm(levels)
# Now map from the scaled range into the artist units
if isinstance(sizes, dict):
lookup_table = sizes
else:
lo, hi = size_range
sizes = lo + sizes_scaled * (hi - lo)
lookup_table = dict(zip(levels, sizes))
return levels, lookup_table, norm, size_range
@share_init_params_with_map
class StyleMapping(SemanticMapping):
"""Mapping that sets artist style according to data values."""
# Style mapping is always treated as categorical
map_type = "categorical"
def __init__(
self, plotter, markers=None, dashes=None, order=None,
):
"""Map the levels of the `style` variable to distinct values.
Parameters
----------
# TODO add generic parameters
"""
super().__init__(plotter)
data = plotter.plot_data.get("style", pd.Series(dtype=float))
if data.notna().any():
# Cast to list to handle numpy/pandas datetime quirks
if variable_type(data) == "datetime":
data = list(data)
# Find ordered unique values
levels = categorical_order(data, order)
markers = self._map_attributes(
markers, levels, unique_markers(len(levels)), "markers",
)
dashes = self._map_attributes(
dashes, levels, unique_dashes(len(levels)), "dashes",
)
# Build the paths matplotlib will use to draw the markers
paths = {}
filled_markers = []
for k, m in markers.items():
if not isinstance(m, mpl.markers.MarkerStyle):
m = mpl.markers.MarkerStyle(m)
paths[k] = m.get_path().transformed(m.get_transform())
filled_markers.append(m.is_filled())
# Mixture of filled and unfilled markers will show line art markers
# in the edge color, which defaults to white. This can be handled,
# but there would be additional complexity with specifying the
# weight of the line art markers without overwhelming the filled
# ones with the edges. So for now, we will disallow mixtures.
if any(filled_markers) and not all(filled_markers):
err = "Filled and line art markers cannot be mixed"
raise ValueError(err)
lookup_table = {}
for key in levels:
lookup_table[key] = {}
if markers:
lookup_table[key]["marker"] = markers[key]
lookup_table[key]["path"] = paths[key]
if dashes:
lookup_table[key]["dashes"] = dashes[key]
self.levels = levels
self.lookup_table = lookup_table
def _lookup_single(self, key, attr=None):
"""Get attribute(s) for a given data point."""
if attr is None:
value = self.lookup_table[key]
else:
value = self.lookup_table[key][attr]
return value
def _map_attributes(self, arg, levels, defaults, attr):
"""Handle the specification for a given style attribute."""
if arg is True:
lookup_table = dict(zip(levels, defaults))
elif isinstance(arg, dict):
missing = set(levels) - set(arg)
if missing:
err = f"These `{attr}` levels are missing values: {missing}"
raise ValueError(err)
lookup_table = arg
elif isinstance(arg, Sequence):
if len(levels) != len(arg):
err = f"The `{attr}` argument has the wrong number of values"
raise ValueError(err)
lookup_table = dict(zip(levels, arg))
elif arg:
err = f"This `{attr}` argument was not understood: {arg}"
raise ValueError(err)
else:
lookup_table = {}
return lookup_table
# =========================================================================== #
class VectorPlotter:
"""Base class for objects underlying *plot functions."""
_semantic_mappings = {
"hue": HueMapping,
"size": SizeMapping,
"style": StyleMapping,
}
# TODO units is another example of a non-mapping "semantic"
# we need a general name for this and separate handling
semantics = "x", "y", "hue", "size", "style", "units"
wide_structure = {
"x": "@index", "y": "@values", "hue": "@columns", "style": "@columns",
}
flat_structure = {"x": "@index", "y": "@values"}
_default_size_range = 1, 2 # Unused but needed in tests, ugh
def __init__(self, data=None, variables={}):
self._var_levels = {}
# var_ordered is relevant only for categorical axis variables, and may
# be better handled by an internal axis information object that tracks
# such information and is set up by the scale_* methods. The analogous
# information for numeric axes would be information about log scales.
self._var_ordered = {"x": False, "y": False} # alt., used DefaultDict
self.assign_variables(data, variables)
for var, cls in self._semantic_mappings.items():
# Create the mapping function
map_func = partial(cls.map, plotter=self)
setattr(self, f"map_{var}", map_func)
# Call the mapping function to initialize with default values
getattr(self, f"map_{var}")()
@classmethod
def get_semantics(cls, kwargs, semantics=None):
"""Subset a dictionary` arguments with known semantic variables."""
# TODO this should be get_variables since we have included x and y
if semantics is None:
semantics = cls.semantics
variables = {}
for key, val in kwargs.items():
if key in semantics and val is not None:
variables[key] = val
return variables
@property
def has_xy_data(self):
"""Return True at least one of x or y is defined."""
return bool({"x", "y"} & set(self.variables))
@property
def var_levels(self):
"""Property interface to ordered list of variables levels.
Each time it's accessed, it updates the var_levels dictionary with the
list of levels in the current semantic mappers. But it also allows the
dictionary to persist, so it can be used to set levels by a key. This is
used to track the list of col/row levels using an attached FacetGrid
object, but it's kind of messy and ideally fixed by improving the
faceting logic so it interfaces better with the modern approach to
tracking plot variables.
"""
for var in self.variables:
try:
map_obj = getattr(self, f"_{var}_map")
self._var_levels[var] = map_obj.levels
except AttributeError:
pass
return self._var_levels
def assign_variables(self, data=None, variables={}):
"""Define plot variables, optionally using lookup from `data`."""
x = variables.get("x", None)
y = variables.get("y", None)
if x is None and y is None:
self.input_format = "wide"
plot_data, variables = self._assign_variables_wideform(
data, **variables,
)
else:
self.input_format = "long"
plot_data, variables = self._assign_variables_longform(
data, **variables,
)
self.plot_data = plot_data
self.variables = variables
self.var_types = {
v: variable_type(
plot_data[v],
boolean_type="numeric" if v in "xy" else "categorical"
)
for v in variables
}
return self
def _assign_variables_wideform(self, data=None, **kwargs):
"""Define plot variables given wide-form data.
Parameters
----------
data : flat vector or collection of vectors
Data can be a vector or mapping that is coerceable to a Series
or a sequence- or mapping-based collection of such vectors, or a
rectangular numpy array, or a Pandas DataFrame.
kwargs : variable -> data mappings
Behavior with keyword arguments is currently undefined.
Returns
-------
plot_data : :class:`pandas.DataFrame`
Long-form data object mapping seaborn variables (x, y, hue, ...)
to data vectors.
variables : dict
Keys are defined seaborn variables; values are names inferred from
the inputs (or None when no name can be determined).
"""
# Raise if semantic or other variables are assigned in wide-form mode
assigned = [k for k, v in kwargs.items() if v is not None]
if any(assigned):
s = "s" if len(assigned) > 1 else ""
err = f"The following variable{s} cannot be assigned with wide-form data: "
err += ", ".join(f"`{v}`" for v in assigned)
raise ValueError(err)
# Determine if the data object actually has any data in it
empty = data is None or not len(data)
# Then, determine if we have "flat" data (a single vector)
if isinstance(data, dict):
values = data.values()
else:
values = np.atleast_1d(np.asarray(data, dtype=object))
flat = not any(
isinstance(v, Iterable) and not isinstance(v, (str, bytes))
for v in values
)
if empty:
# Make an object with the structure of plot_data, but empty
plot_data = pd.DataFrame()
variables = {}
elif flat:
# Handle flat data by converting to pandas Series and using the
# index and/or values to define x and/or y
# (Could be accomplished with a more general to_series() interface)
flat_data = pd.Series(data).copy()
names = {
"@values": flat_data.name,
"@index": flat_data.index.name
}
plot_data = {}
variables = {}
for var in ["x", "y"]:
if var in self.flat_structure:
attr = self.flat_structure[var]
plot_data[var] = getattr(flat_data, attr[1:])
variables[var] = names[self.flat_structure[var]]
plot_data = pd.DataFrame(plot_data)
else:
# Otherwise assume we have some collection of vectors.
# Handle Python sequences such that entries end up in the columns,
# not in the rows, of the intermediate wide DataFrame.
# One way to accomplish this is to convert to a dict of Series.
if isinstance(data, Sequence):
data_dict = {}
for i, var in enumerate(data):
key = getattr(var, "name", i)
# TODO is there a safer/more generic way to ensure Series?
# sort of like np.asarray, but for pandas?
data_dict[key] = pd.Series(var)
data = data_dict
# Pandas requires that dict values either be Series objects
# or all have the same length, but we want to allow "ragged" inputs
if isinstance(data, Mapping):
data = {key: pd.Series(val) for key, val in data.items()}
# Otherwise, delegate to the pandas DataFrame constructor
# This is where we'd prefer to use a general interface that says
# "give me this data as a pandas DataFrame", so we can accept
# DataFrame objects from other libraries
wide_data = pd.DataFrame(data, copy=True)
# At this point we should reduce the dataframe to numeric cols
numeric_cols = [
k for k, v in wide_data.items() if variable_type(v) == "numeric"
]
wide_data = wide_data[numeric_cols]
# Now melt the data to long form
melt_kws = {"var_name": "@columns", "value_name": "@values"}
use_index = "@index" in self.wide_structure.values()
if use_index:
melt_kws["id_vars"] = "@index"
try:
orig_categories = wide_data.columns.categories
orig_ordered = wide_data.columns.ordered
wide_data.columns = wide_data.columns.add_categories("@index")
except AttributeError:
category_columns = False
else:
category_columns = True
wide_data["@index"] = wide_data.index.to_series()
plot_data = wide_data.melt(**melt_kws)
if use_index and category_columns:
plot_data["@columns"] = pd.Categorical(plot_data["@columns"],
orig_categories,
orig_ordered)
# Assign names corresponding to plot semantics
for var, attr in self.wide_structure.items():
plot_data[var] = plot_data[attr]
# Define the variable names
variables = {}
for var, attr in self.wide_structure.items():
obj = getattr(wide_data, attr[1:])
variables[var] = getattr(obj, "name", None)
# Remove redundant columns from plot_data
plot_data = plot_data[list(variables)]
return plot_data, variables
def _assign_variables_longform(self, data=None, **kwargs):
"""Define plot variables given long-form data and/or vector inputs.
Parameters
----------
data : dict-like collection of vectors
Input data where variable names map to vector values.
kwargs : variable -> data mappings
Keys are seaborn variables (x, y, hue, ...) and values are vectors
in any format that can construct a :class:`pandas.DataFrame` or
names of columns or index levels in ``data``.
Returns
-------
plot_data : :class:`pandas.DataFrame`
Long-form data object mapping seaborn variables (x, y, hue, ...)
to data vectors.
variables : dict
Keys are defined seaborn variables; values are names inferred from
the inputs (or None when no name can be determined).
Raises
------
ValueError
When variables are strings that don't appear in ``data``.
"""
plot_data = {}
variables = {}
# Data is optional; all variables can be defined as vectors
if data is None:
data = {}
# TODO should we try a data.to_dict() or similar here to more
# generally accept objects with that interface?
# Note that dict(df) also works for pandas, and gives us what we
# want, whereas DataFrame.to_dict() gives a nested dict instead of
# a dict of series.
# Variables can also be extraced from the index attribute
# TODO is this the most general way to enable it?
# There is no index.to_dict on multiindex, unfortunately
try:
index = data.index.to_frame()
except AttributeError:
index = {}
# The caller will determine the order of variables in plot_data
for key, val in kwargs.items():
# First try to treat the argument as a key for the data collection.
# But be flexible about what can be used as a key.
# Usually it will be a string, but allow numbers or tuples too when
# taking from the main data object. Only allow strings to reference
# fields in the index, because otherwise there is too much ambiguity.
try:
val_as_data_key = (
val in data
or (isinstance(val, (str, bytes)) and val in index)
)
except (KeyError, TypeError):
val_as_data_key = False
if val_as_data_key:
# We know that __getitem__ will work
if val in data:
plot_data[key] = data[val]
elif val in index:
plot_data[key] = index[val]
variables[key] = val
elif isinstance(val, (str, bytes)):
# This looks like a column name but we don't know what it means!
err = f"Could not interpret value `{val}` for parameter `{key}`"
raise ValueError(err)
else:
# Otherwise, assume the value is itself data
# Raise when data object is present and a vector can't matched
if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):
if np.ndim(val) and len(data) != len(val):
val_cls = val.__class__.__name__
err = (
f"Length of {val_cls} vectors must match length of `data`"
f" when both are used, but `data` has length {len(data)}"
f" and the vector passed to `{key}` has length {len(val)}."
)
raise ValueError(err)
plot_data[key] = val
# Try to infer the name of the variable
variables[key] = getattr(val, "name", None)
# Construct a tidy plot DataFrame. This will convert a number of
# types automatically, aligning on index in case of pandas objects
plot_data = pd.DataFrame(plot_data)
# Reduce the variables dictionary to fields with valid data
variables = {
var: name
for var, name in variables.items()
if plot_data[var].notnull().any()
}
return plot_data, variables
def iter_data(
self, grouping_vars=None, *,
reverse=False, from_comp_data=False,
by_facet=True, allow_empty=False, dropna=True,
):
"""Generator for getting subsets of data defined by semantic variables.
Also injects "col" and "row" into grouping semantics.
Parameters
----------
grouping_vars : string or list of strings
Semantic variables that define the subsets of data.
reverse : bool
If True, reverse the order of iteration.
from_comp_data : bool
If True, use self.comp_data rather than self.plot_data
by_facet : bool
If True, add faceting variables to the set of grouping variables.
allow_empty : bool
If True, yield an empty dataframe when no observations exist for
combinations of grouping variables.
dropna : bool
If True, remove rows with missing data.
Yields
------
sub_vars : dict
Keys are semantic names, values are the level of that semantic.
sub_data : :class:`pandas.DataFrame`
Subset of ``plot_data`` for this combination of semantic values.
"""
# TODO should this default to using all (non x/y?) semantics?
# or define groupping vars somewhere?
if grouping_vars is None:
grouping_vars = []
elif isinstance(grouping_vars, str):
grouping_vars = [grouping_vars]
elif isinstance(grouping_vars, tuple):
grouping_vars = list(grouping_vars)
# Always insert faceting variables
if by_facet:
facet_vars = {"col", "row"}
grouping_vars.extend(
facet_vars & set(self.variables) - set(grouping_vars)
)
# Reduce to the semantics used in this plot
grouping_vars = [
var for var in grouping_vars if var in self.variables
]
if from_comp_data:
data = self.comp_data
else:
data = self.plot_data
if dropna:
data = data.dropna()
levels = self.var_levels.copy()
if from_comp_data:
for axis in {"x", "y"} & set(grouping_vars):
if self.var_types[axis] == "categorical":
if self._var_ordered[axis]:
# If the axis is ordered, then the axes in a possible
# facet grid are by definition "shared", or there is a
# single axis with a unique cat -> idx mapping.
# So we can just take the first converter object.
converter = self.converters[axis].iloc[0]
levels[axis] = converter.convert_units(levels[axis])
else:
# Otherwise, the mappings may not be unique, but we can
# use the unique set of index values in comp_data.
levels[axis] = np.sort(data[axis].unique())
elif self.var_types[axis] == "datetime":
levels[axis] = mpl.dates.date2num(levels[axis])
elif self.var_types[axis] == "numeric" and self._log_scaled(axis):
levels[axis] = np.log10(levels[axis])
if grouping_vars:
grouped_data = data.groupby(
grouping_vars, sort=False, as_index=False
)
grouping_keys = []
for var in grouping_vars:
grouping_keys.append(levels.get(var, []))
iter_keys = itertools.product(*grouping_keys)
if reverse:
iter_keys = reversed(list(iter_keys))
for key in iter_keys:
# Pandas fails with singleton tuple inputs
pd_key = key[0] if len(key) == 1 else key
try:
data_subset = grouped_data.get_group(pd_key)
except KeyError:
# XXX we are adding this to allow backwards compatability
# with the empty artists that old categorical plots would
# add (before 0.12), which we may decide to break, in which
# case this option could be removed
data_subset = data.loc[[]]
if data_subset.empty and not allow_empty:
continue
sub_vars = dict(zip(grouping_vars, key))
yield sub_vars, data_subset.copy()
else:
yield {}, data.copy()
@property
def comp_data(self):
"""Dataframe with numeric x and y, after unit conversion and log scaling."""
if not hasattr(self, "ax"):
# Probably a good idea, but will need a bunch of tests updated
# Most of these tests should just use the external interface
# Then this can be re-enabled.
# raise AttributeError("No Axes attached to plotter")
return self.plot_data
if not hasattr(self, "_comp_data"):
comp_data = (
self.plot_data
.copy(deep=False)
.drop(["x", "y"], axis=1, errors="ignore")
)
for var in "yx":
if var not in self.variables:
continue
comp_col = pd.Series(index=self.plot_data.index, dtype=float, name=var)
grouped = self.plot_data[var].groupby(self.converters[var], sort=False)
for converter, orig in grouped:
with pd.option_context('mode.use_inf_as_null', True):
orig = orig.dropna()
if var in self.var_levels:
# TODO this should happen in some centralized location
# it is similar to GH2419, but more complicated because
# supporting `order` in categorical plots is tricky
orig = orig[orig.isin(self.var_levels[var])]
comp = pd.to_numeric(converter.convert_units(orig))
if converter.get_scale() == "log":
comp = np.log10(comp)
comp_col.loc[orig.index] = comp
comp_data.insert(0, var, comp_col)
self._comp_data = comp_data
return self._comp_data
def _get_axes(self, sub_vars):
"""Return an Axes object based on existence of row/col variables."""
row = sub_vars.get("row", None)
col = sub_vars.get("col", None)
if row is not None and col is not None:
return self.facets.axes_dict[(row, col)]
elif row is not None:
return self.facets.axes_dict[row]
elif col is not None:
return self.facets.axes_dict[col]
elif self.ax is None:
return self.facets.ax
else:
return self.ax
def _attach(
self,
obj,
allowed_types=None,
log_scale=None,
):
"""Associate the plotter with an Axes manager and initialize its units.
Parameters
----------
obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`
Structural object that we will eventually plot onto.
allowed_types : str or list of str
If provided, raise when either the x or y variable does not have
one of the declared seaborn types.
log_scale : bool, number, or pair of bools or numbers
If not False, set the axes to use log scaling, with the given
base or defaulting to 10. If a tuple, interpreted as separate
arguments for the x and y axes.
"""
from .axisgrid import FacetGrid
if isinstance(obj, FacetGrid):
self.ax = None
self.facets = obj
ax_list = obj.axes.flatten()
if obj.col_names is not None:
self.var_levels["col"] = obj.col_names
if obj.row_names is not None:
self.var_levels["row"] = obj.row_names
else:
self.ax = obj
self.facets = None
ax_list = [obj]
# Identify which "axis" variables we have defined
axis_variables = set("xy").intersection(self.variables)
# -- Verify the types of our x and y variables here.
# This doesn't really make complete sense being here here, but it's a fine
# place for it, given the current system.
# (Note that for some plots, there might be more complicated restrictions)
# e.g. the categorical plots have their own check that as specific to the
# non-categorical axis.
if allowed_types is None:
allowed_types = ["numeric", "datetime", "categorical"]
elif isinstance(allowed_types, str):
allowed_types = [allowed_types]
for var in axis_variables:
var_type = self.var_types[var]
if var_type not in allowed_types:
err = (
f"The {var} variable is {var_type}, but one of "
f"{allowed_types} is required"
)
raise TypeError(err)
# -- Get axis objects for each row in plot_data for type conversions and scaling
facet_dim = {"x": "col", "y": "row"}
self.converters = {}
for var in axis_variables:
other_var = {"x": "y", "y": "x"}[var]
converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)
share_state = getattr(self.facets, f"_share{var}", True)
# Simplest cases are that we have a single axes, all axes are shared,
# or sharing is only on the orthogonal facet dimension. In these cases,
# all datapoints get converted the same way, so use the first axis
if share_state is True or share_state == facet_dim[other_var]:
converter.loc[:] = getattr(ax_list[0], f"{var}axis")
else:
# Next simplest case is when no axes are shared, and we can
# use the axis objects within each facet
if share_state is False:
for axes_vars, axes_data in self.iter_data():
ax = self._get_axes(axes_vars)
converter.loc[axes_data.index] = getattr(ax, f"{var}axis")
# In the more complicated case, the axes are shared within each
# "file" of the facetgrid. In that case, we need to subset the data
# for that file and assign it the first axis in the slice of the grid
else:
names = getattr(self.facets, f"{share_state}_names")
for i, level in enumerate(names):
idx = (i, 0) if share_state == "row" else (0, i)
axis = getattr(self.facets.axes[idx], f"{var}axis")
converter.loc[self.plot_data[share_state] == level] = axis
# Store the converter vector, which we use elsewhere (e.g comp_data)
self.converters[var] = converter
# Now actually update the matplotlib objects to do the conversion we want
grouped = self.plot_data[var].groupby(self.converters[var], sort=False)
for converter, seed_data in grouped:
if self.var_types[var] == "categorical":
if self._var_ordered[var]:
order = self.var_levels[var]
else:
order = None
seed_data = categorical_order(seed_data, order)
converter.update_units(seed_data)
# -- Set numerical axis scales
# First unpack the log_scale argument
if log_scale is None:
scalex = scaley = False
else:
# Allow single value or x, y tuple
try:
scalex, scaley = log_scale
except TypeError:
scalex = log_scale if "x" in self.variables else False
scaley = log_scale if "y" in self.variables else False
# Now use it
for axis, scale in zip("xy", (scalex, scaley)):
if scale:
for ax in ax_list:
set_scale = getattr(ax, f"set_{axis}scale")
if scale is True:
set_scale("log")
else:
if LooseVersion(mpl.__version__) >= "3.3":
set_scale("log", base=scale)
else:
set_scale("log", **{f"base{axis}": scale})
# For categorical y, we want the "first" level to be at the top of the axis
if self.var_types.get("y", None) == "categorical":
for ax in ax_list:
try:
ax.yaxis.set_inverted(True)
except AttributeError: # mpl < 3.1
if not ax.yaxis_inverted():
ax.invert_yaxis()
# TODO -- Add axes labels
def _log_scaled(self, axis):
"""Return True if specified axis is log scaled on all attached axes."""
if not hasattr(self, "ax"):
return False
if self.ax is None:
axes_list = self.facets.axes.flatten()
else:
axes_list = [self.ax]
log_scaled = []
for ax in axes_list:
data_axis = getattr(ax, f"{axis}axis")
log_scaled.append(data_axis.get_scale() == "log")
if any(log_scaled) and not all(log_scaled):
raise RuntimeError("Axis scaling is not consistent")
return any(log_scaled)
def _add_axis_labels(self, ax, default_x="", default_y=""):
"""Add axis labels if not present, set visibility to match ticklabels."""
# TODO ax could default to None and use attached axes if present
# but what to do about the case of facets? Currently using FacetGrid's
# set_axis_labels method, which doesn't add labels to the interior even
# when the axes are not shared. Maybe that makes sense?
if not ax.get_xlabel():
x_visible = any(t.get_visible() for t in ax.get_xticklabels())
ax.set_xlabel(self.variables.get("x", default_x), visible=x_visible)
if not ax.get_ylabel():
y_visible = any(t.get_visible() for t in ax.get_yticklabels())
ax.set_ylabel(self.variables.get("y", default_y), visible=y_visible)
# XXX If the scale_* methods are going to modify the plot_data structure, they
# can't be called twice. That means that if they are called twice, they should
# raise. Alternatively, we could store an original version of plot_data and each
# time they are called they operate on the store, not the current state.
def scale_native(self, axis, *args, **kwargs):
# Default, defer to matplotlib
raise NotImplementedError
def scale_numeric(self, axis, *args, **kwargs):
# Feels needed to completeness, what should it do?
# Perhaps handle log scaling? Set the ticker/formatter/limits?
raise NotImplementedError
def scale_datetime(self, axis, *args, **kwargs):
# Use pd.to_datetime to convert strings or numbers to datetime objects
# Note, use day-resolution for numeric->datetime to match matplotlib
raise NotImplementedError
def scale_categorical(self, axis, order=None, formatter=None):
"""
Enforce categorical (fixed-scale) rules for the data on given axis.
Parameters
----------
axis : "x" or "y"
Axis of the plot to operate on.
order : list
Order that unique values should appear in.
formatter : callable
Function mapping values to a string representation.
Returns
-------
self
"""
# This method both modifies the internal representation of the data
# (converting it to string) and sets some attributes on self. It might be
# a good idea to have a separate object attached to self that contains the
# information in those attributes (i.e. whether to enforce variable order
# across facets, the order to use) similar to the SemanticMapping objects
# we have for semantic variables. That object could also hold the converter
# objects that get used, if we can decouple those from an existing axis
# (cf. https://github.com/matplotlib/matplotlib/issues/19229).
# There are some interactions with faceting information that would need
# to be thought through, since the converts to use depend on facets.
# If we go that route, these methods could become "borrowed" methods similar
# to what happens with the alternate semantic mapper constructors, although
# that approach is kind of fussy and confusing.
# TODO this method could also set the grid state? Since we like to have no
# grid on the categorical axis by default. Again, a case where we'll need to
# store information until we use it, so best to have a way to collect the
# attributes that this method sets.
# TODO if we are going to set visual properties of the axes with these methods,
# then we could do the steps currently in CategoricalPlotter._adjust_cat_axis
# TODO another, and distinct idea, is to expose a cut= param here
_check_argument("axis", ["x", "y"], axis)
# Categorical plots can be "univariate" in which case they get an anonymous
# category label on the opposite axis.
if axis not in self.variables:
self.variables[axis] = None
self.var_types[axis] = "categorical"
self.plot_data[axis] = ""
# If the "categorical" variable has a numeric type, sort the rows so that
# the default result from categorical_order has those values sorted after
# they have been coerced to strings. The reason for this is so that later
# we can get facet-wise orders that are correct.
# XXX Should this also sort datetimes?
# It feels more consistent, but technically will be a default change
# If so, should also change categorical_order to behave that way
if self.var_types[axis] == "numeric":
self.plot_data = self.plot_data.sort_values(axis, kind="mergesort")
# Now get a reference to the categorical data vector
cat_data = self.plot_data[axis]
# Get the initial categorical order, which we do before string
# conversion to respect the original types of the order list.
# Track whether the order is given explicitly so that we can know
# whether or not to use the order constructed here downstream
self._var_ordered[axis] = order is not None or cat_data.dtype.name == "category"
order = pd.Index(categorical_order(cat_data, order))
# Then convert data to strings. This is because in matplotlib,
# "categorical" data really mean "string" data, so doing this artists
# will be drawn on the categorical axis with a fixed scale.
# TODO implement formatter here; check that it returns strings?
if formatter is not None:
cat_data = cat_data.map(formatter)
order = order.map(formatter)
else:
cat_data = cat_data.astype(str)
order = order.astype(str)
# Update the levels list with the type-converted order variable
self.var_levels[axis] = order
# Now ensure that seaborn will use categorical rules internally
self.var_types[axis] = "categorical"
# Put the string-typed categorical vector back into the plot_data structure
self.plot_data[axis] = cat_data
return self
class VariableType(UserString):
"""
Prevent comparisons elsewhere in the library from using the wrong name.
Errors are simple assertions because users should not be able to trigger
them. If that changes, they should be more verbose.
"""
allowed = "numeric", "datetime", "categorical"
def __init__(self, data):
assert data in self.allowed, data
super().__init__(data)
def __eq__(self, other):
assert other in self.allowed, other
return self.data == other
def variable_type(vector, boolean_type="numeric"):
"""
Determine whether a vector contains numeric, categorical, or datetime data.
This function differs from the pandas typing API in two ways:
- Python sequences or object-typed PyData objects are considered numeric if
all of their entries are numeric.
- String or mixed-type data are considered categorical even if not
explicitly represented as a :class:`pandas.api.types.CategoricalDtype`.
Parameters
----------
vector : :func:`pandas.Series`, :func:`numpy.ndarray`, or Python sequence
Input data to test.
boolean_type : 'numeric' or 'categorical'
Type to use for vectors containing only 0s and 1s (and NAs).
Returns
-------
var_type : 'numeric', 'categorical', or 'datetime'
Name identifying the type of data in the vector.
"""
# If a categorical dtype is set, infer categorical
if pd.api.types.is_categorical_dtype(vector):
return VariableType("categorical")
# Special-case all-na data, which is always "numeric"
if pd.isna(vector).all():
return VariableType("numeric")
# Special-case binary/boolean data, allow caller to determine
# This triggers a numpy warning when vector has strings/objects
# https://github.com/numpy/numpy/issues/6784
# Because we reduce with .all(), we are agnostic about whether the
# comparison returns a scalar or vector, so we will ignore the warning.
# It triggers a separate DeprecationWarning when the vector has datetimes:
# https://github.com/numpy/numpy/issues/13548
# This is considered a bug by numpy and will likely go away.
with warnings.catch_warnings():
warnings.simplefilter(
action='ignore', category=(FutureWarning, DeprecationWarning)
)
if np.isin(vector, [0, 1, np.nan]).all():
return VariableType(boolean_type)
# Defer to positive pandas tests
if pd.api.types.is_numeric_dtype(vector):
return VariableType("numeric")
if | pd.api.types.is_datetime64_dtype(vector) | pandas.api.types.is_datetime64_dtype |
import os
import unittest
import pandas as pd
from context import technical as ti
# Change working directory
# This enable running tests from repository root
if os.getcwd() != os.path.abspath(os.path.dirname(__file__)):
os.chdir('tests/')
# Test results
class ResultsRSI(unittest.TestCase):
# Input data
test_data = pd.read_csv('test_data/correct_series.csv')
test_data_df = | pd.read_csv('test_data/correct_ohlc.csv') | pandas.read_csv |
"""
Functions to make all of the figures for Solar Forecast Arbiter reports using
Bokeh.
This code is currently unreachable from the rest of the Solar Forecast Arbiter
Core library. It may be used in place of the plotly_figures to generate bokeh
plots for the `plots` attribute of the RawReport object. See
:py:mod:`solarforecastarbiter.reports.main` for an example of report
generation.
"""
import calendar
from contextlib import contextmanager
import datetime as dt
from itertools import cycle
import logging
import warnings
from bokeh.embed import components
from bokeh.io.export import get_svgs
from bokeh.layouts import gridplot
from bokeh.models import (ColumnDataSource, HoverTool, Legend,
DatetimeTickFormatter, CategoricalTickFormatter,
CDSView, GroupFilter, BooleanFilter)
from bokeh.models.ranges import Range1d, FactorRange, DataRange1d
from bokeh.plotting import figure
from bokeh.transform import factor_cmap, dodge
from bokeh import palettes
from bokeh import __version__ as bokeh_version
import pandas as pd
import numpy as np
from solarforecastarbiter import datamodel
from solarforecastarbiter.plotting.utils import line_or_step
logger = logging.getLogger(__name__)
PALETTE = (
palettes.d3['Category20'][20][::2] + palettes.d3['Category20'][20][1::2])
_num_obs_colors = 3
# drop white
OBS_PALETTE = list(palettes.grey(_num_obs_colors + 1)[0:_num_obs_colors])
OBS_PALETTE.reverse()
OBS_PALETTE_TD_RANGE = pd.timedelta_range(
freq='10min', end='60min', periods=_num_obs_colors)
def construct_timeseries_cds(report):
"""Construct two standardized Bokeh CDS for the timeseries and scatter
plot functions. One with timeseries data for all observations,
aggregates, and forecasts in the report, and the other with
associated metadata sharing a common `pair_index` key.
Parameters
----------
report: :py:class:`solarforecastarbiter.datamodel.Report`
Returns
-------
value_cds : bokeh.models.ColumnDataSource
Keys are an integer `pair_index` for pairing values with the metadata
in the metadata_cds, and two pandas.Series, `observation_values` and
`forecast_values`.
metadata_cds : bokeh.models.ColumnDataSource
This cds has the following keys:
- `pair_index`: Integer for pairing metadata with the values in the value_cds.
- `observation_name`: Observation name.
- `forecast_name`: Forecast name.
- `interval_label`: Interval label of the processed forecast and observation data.
- `observation_hash`: Hash of the original observation object and the `datamodel.ProcessedForecastObservations` metadata.
- `forecast_hash`: Hash of the original forecast object and the `datamodel.ProcessedForecastObservations` metadata.
""" # NOQA
value_frames = []
meta_rows = []
for idx, pfxobs in enumerate(
report.raw_report.processed_forecasts_observations):
value_frame_dict = {
'pair_index': idx,
'observation_values': pfxobs.observation_values,
'forecast_values': pfxobs.forecast_values,
}
meta_row_dict = {
'pair_index': idx,
'observation_name': _obs_name(pfxobs.original),
'forecast_name': _fx_name(pfxobs.original),
'interval_label': pfxobs.interval_label,
'observation_hash': str(hash(
(pfxobs.original.data_object,
pfxobs.interval_length,
pfxobs.interval_value_type,
pfxobs.interval_label))),
'forecast_hash': str(hash(
(pfxobs.original.forecast,
pfxobs.interval_length,
pfxobs.interval_value_type,
pfxobs.interval_label))),
'observation_color': _obs_color(
pfxobs.interval_length)
}
value_frames.append(pd.DataFrame(value_frame_dict))
meta_rows.append(meta_row_dict)
data = pd.concat(value_frames)
metadata = | pd.DataFrame(meta_rows) | pandas.DataFrame |
from itertools import product
from string import ascii_uppercase
import pandas as pd
from pandas.tseries.offsets import MonthBegin
from .futures import CMES_CODE_TO_MONTH
def make_rotating_equity_info(num_assets,
first_start,
frequency,
periods_between_starts,
asset_lifetime,
exchange='TEST'):
"""
Create a DataFrame representing lifetimes of assets that are constantly
rotating in and out of existence.
Parameters
----------
num_assets : int
How many assets to create.
first_start : pd.Timestamp
The start date for the first asset.
frequency : str or pd.tseries.offsets.Offset (e.g. trading_day)
Frequency used to interpret next two arguments.
periods_between_starts : int
Create a new asset every `frequency` * `periods_between_new`
asset_lifetime : int
Each asset exists for `frequency` * `asset_lifetime` days.
exchange : str, optional
The exchange name.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
return pd.DataFrame(
{
'symbol': [chr(ord('A') + i) for i in range(num_assets)],
# Start a new asset every `periods_between_starts` days.
'start_date': pd.date_range(
first_start,
freq=(periods_between_starts * frequency),
periods=num_assets,
),
# Each asset lasts for `asset_lifetime` days.
'end_date': pd.date_range(
first_start + (asset_lifetime * frequency),
freq=(periods_between_starts * frequency),
periods=num_assets,
),
'exchange': exchange,
},
index=range(num_assets),
)
def make_simple_equity_info(sids,
start_date,
end_date,
symbols=None,
names=None,
exchange='TEST'):
"""
Create a DataFrame representing assets that exist for the full duration
between `start_date` and `end_date`.
Parameters
----------
sids : array-like of int
start_date : pd.Timestamp, optional
end_date : pd.Timestamp, optional
symbols : list, optional
Symbols to use for the assets.
If not provided, symbols are generated from the sequence 'A', 'B', ...
names : list, optional
Names to use for the assets.
If not provided, names are generated by adding " INC." to each of the
symbols (which might also be auto-generated).
exchange : str, optional
The exchange name.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
num_assets = len(sids)
if symbols is None:
symbols = list(ascii_uppercase[:num_assets])
else:
symbols = list(symbols)
if names is None:
names = [str(s) + " INC." for s in symbols]
return pd.DataFrame(
{
'symbol': symbols,
'start_date': pd.to_datetime([start_date] * num_assets),
'end_date': pd.to_datetime([end_date] * num_assets),
'asset_name': list(names),
'exchange': exchange,
},
index=sids,
columns=(
'start_date',
'end_date',
'symbol',
'exchange',
'asset_name',
),
)
def make_simple_multi_country_equity_info(countries_to_sids,
countries_to_exchanges,
start_date,
end_date):
"""Create a DataFrame representing assets that exist for the full duration
between `start_date` and `end_date`, from multiple countries.
"""
sids = []
symbols = []
exchanges = []
for country, country_sids in countries_to_sids.items():
exchange = countries_to_exchanges[country]
for i, sid in enumerate(country_sids):
sids.append(sid)
symbols.append('-'.join([country, str(i)]))
exchanges.append(exchange)
return pd.DataFrame(
{
'symbol': symbols,
'start_date': start_date,
'end_date': end_date,
'asset_name': symbols,
'exchange': exchanges,
},
index=sids,
columns=(
'start_date',
'end_date',
'symbol',
'exchange',
'asset_name',
),
)
def make_jagged_equity_info(num_assets,
start_date,
first_end,
frequency,
periods_between_ends,
auto_close_delta):
"""
Create a DataFrame representing assets that all begin at the same start
date, but have cascading end dates.
Parameters
----------
num_assets : int
How many assets to create.
start_date : pd.Timestamp
The start date for all the assets.
first_end : pd.Timestamp
The date at which the first equity will end.
frequency : str or pd.tseries.offsets.Offset (e.g. trading_day)
Frequency used to interpret the next argument.
periods_between_ends : int
Starting after the first end date, end each asset every
`frequency` * `periods_between_ends`.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
frame = pd.DataFrame(
{
'symbol': [chr(ord('A') + i) for i in range(num_assets)],
'start_date': start_date,
'end_date': pd.date_range(
first_end,
freq=(periods_between_ends * frequency),
periods=num_assets,
),
'exchange': 'TEST',
},
index=range(num_assets),
)
# Explicitly pass None to disable setting the auto_close_date column.
if auto_close_delta is not None:
frame['auto_close_date'] = frame['end_date'] + auto_close_delta
return frame
def make_future_info(first_sid,
root_symbols,
years,
notice_date_func,
expiration_date_func,
start_date_func,
month_codes=None,
multiplier=500):
"""
Create a DataFrame representing futures for `root_symbols` during `year`.
Generates a contract per triple of (symbol, year, month) supplied to
`root_symbols`, `years`, and `month_codes`.
Parameters
----------
first_sid : int
The first sid to use for assigning sids to the created contracts.
root_symbols : list[str]
A list of root symbols for which to create futures.
years : list[int or str]
Years (e.g. 2014), for which to produce individual contracts.
notice_date_func : (Timestamp) -> Timestamp
Function to generate notice dates from first of the month associated
with asset month code. Return NaT to simulate futures with no notice
date.
expiration_date_func : (Timestamp) -> Timestamp
Function to generate expiration dates from first of the month
associated with asset month code.
start_date_func : (Timestamp) -> Timestamp, optional
Function to generate start dates from first of the month associated
with each asset month code. Defaults to a start_date one year prior
to the month_code date.
month_codes : dict[str -> [1..12]], optional
Dictionary of month codes for which to create contracts. Entries
should be strings mapped to values from 1 (January) to 12 (December).
Default is zipline.futures.CMES_CODE_TO_MONTH
multiplier : int
The contract multiplier.
Returns
-------
futures_info : pd.DataFrame
DataFrame of futures data suitable for passing to an AssetDBWriter.
"""
if month_codes is None:
month_codes = CMES_CODE_TO_MONTH
year_strs = list(map(str, years))
years = [pd.Timestamp(s, tz='UTC') for s in year_strs]
# Pairs of string/date like ('K06', 2006-05-01) sorted by year/month
# `MonthBegin(month_num - 1)` since the year already starts at month 1.
contract_suffix_to_beginning_of_month = tuple(
(month_code + year_str[-2:], year + MonthBegin(month_num - 1))
for ((year, year_str), (month_code, month_num))
in product(
zip(years, year_strs),
sorted(list(month_codes.items()), key=lambda item: item[1]),
)
)
contracts = []
parts = product(root_symbols, contract_suffix_to_beginning_of_month)
for sid, (root_sym, (suffix, month_begin)) in enumerate(parts, first_sid):
contracts.append({
'sid': sid,
'root_symbol': root_sym,
'symbol': root_sym + suffix,
'start_date': start_date_func(month_begin),
'notice_date': notice_date_func(month_begin),
'expiration_date': expiration_date_func(month_begin),
'multiplier': multiplier,
'exchange': "TEST",
})
return | pd.DataFrame.from_records(contracts, index='sid') | pandas.DataFrame.from_records |
from functools import reduce
import pandas_profiling
from pandas import read_csv, read_table, merge, concat
def fn_to_df_(filename, from_='raw_datasets', samples=0, describe=False):
fn = f'{from_}/{filename}'
if '.csv' in filename:
df = read_csv(fn)
elif '.xyz' in filename:
df = read_table(fn, skiprows=5, sep='\s+', dtype=float,
names=['X', 'Y', 'Z', filename.split('.')[0]])
if samples:
df = df.sample(samples)
if describe:
dfd = df.describe()
dfd.insert(0, 'Stat', dfd.index)
return df, dfd
return df
def gen_profile_from_df(df, filename):
pf = pandas_profiling.ProfileReport(df)
pf.to_file(f'templates/{filename}.html')
def simple_merge(dfs, on_, how_):
return reduce(lambda left, right: merge(left, right, on=on_, how=how_), dfs)
def simple_concat(dfs):
return | concat(dfs) | pandas.concat |
# Module deals with creation of ligand and receptor scores, and creation of scConnect tables etc.
import scConnect as cn
import scanpy as sc
version = cn.database.version
organism = cn.database.organism
# Scoring logic for ligands
def ligandScore(ligand, genes):
"""calculate ligand score for given ligand and gene set"""
from scipy.stats.mstats import gmean
import numpy as np
if ligand.ligand_type == "peptide" and isinstance(ligand.preprogene, str):
# check if multiple genes needs to be accounted for
if isinstance(eval(ligand.preprogene), list):
ligand_genes = list()
for gene in eval(ligand.preprogene):
try:
ligand_genes.append(genes[gene])
except KeyError:
#print(f"{gene} not found")
ligand_genes.append(0.0)
# use max, as there might be many orthologs genes for one original
# gene and not all have to be expressed
try:
ligand_score = max(ligand_genes)
except ValueError:
print(f"something is wrong with the list {ligand_genes}")
ligand_score = 0.0
return ligand_score
elif ligand.ligand_type == "molecule":
synthesis = ligand.synthesis
transport = ligand.transport
reuptake = ligand.reuptake
excluded = ligand.excluded
# get geometric mean of synthesis genes (all need to be present)
if not isinstance(synthesis, str):
# If no genes are needed, synthesis is set to nan
synthesis = np.nan
else:
synthesis_expression = list()
for gene in eval(synthesis):
try:
synthesis_expression.append(genes[gene])
except KeyError:
# If gene was not found append 0
#print(f"{gene} not found")
synthesis_expression.append(0.0)
synthesis = gmean(synthesis_expression)
# get maximum of vesicle transporters (only one is needed for molecule transport)
if not isinstance(transport, str):
# If no specific genes are needed, set transport to nan
transport = np.nan
else:
transport_expression = list()
for gene in eval(transport):
try:
transport_expression.append(genes[gene])
except KeyError:
# If gene was not found append 0
#print(f"{gene} not found")
transport_expression.append(0.0)
transport = max(transport_expression)
# Get maximum of reuptake genes (only one is needed)
if not isinstance(reuptake, str):
# If no specific genes are needed, set reuptake to nan
reuptake = np.nan
else:
reuptake_expression = list()
for gene in eval(reuptake):
try:
reuptake_expression.append(genes[gene])
except KeyError:
# If gene was not found append 0
#print(f"{gene} not found")
reuptake_expression.append(0.0)
reuptake = max(reuptake_expression)
# get maximum among exluding genes where any gene expression divert to other ligands
if not isinstance(excluded, str):
# If no specific genes are needed, set excluded to 0
excluded = 0
else:
excluded_expression = list()
for gene in eval(excluded):
try:
excluded_expression.append(genes[gene])
except KeyError:
# If gene was not found append 0
#print(f"{gene} not found")
excluded_expression.append(0.0)
excluded = max(excluded_expression)
# return geometric mean of synthesis, transport and reuptake multipled exclusion
promoting_factor = gmean(([x for x in [synthesis, transport, reuptake] if str(x) != "nan"])) # genes driving ligand production, remove nan values
if str(promoting_factor) == "nan": # capture cases where no promoting genes were present
print(f"no promoting genes detected for {ligand.ligand}")
return 0.0 # exit before running exclusion calculation
ligand_score = promoting_factor - excluded # correct ligand expression based on the exclusion factor
if ligand_score < 0: # ligand score should be 0 or positive
ligand_score = 0.0
return ligand_score
# If genes are missing from ligand gene list
else:
print("Big error! ligand type is not defined!")
return 0.0
def ligands(adata, organism=organism, select_ligands=None):
"""return a dataframe with ligand scores for each cluster.
.. note::
Needs a gene call dataframe under adata.uns.gene_call.
Use scConnect.genecall to create such dataframe
organism defaults to mouse, to use genes for other organism select this here.
use select_ligands to only asses given ligands
(used by optimize_segregation to only check for gaba and glutamate)
Returns: Dict of ligand call for each cluster.
"""
import scConnect as cn
import pkg_resources
import pandas as pd
ligands = pd.read_csv(pkg_resources.resource_filename(
__name__, (f"data/Gene_annotation/{version}/{organism}/ligands.csv")))
if isinstance(select_ligands, list):
select = [True if ligand in select_ligands else False for ligand in ligands.ligand]
ligands = ligands[select]
ligand_df = pd.DataFrame(index=ligands.ligand)
for cluster, genes in adata.uns["gene_call"].items():
cluster_scores = list()
for ligand_data in ligands.iterrows():
ligand = ligand_data[1]
# fetch ligand score for specific ligand and gene set
ligand_score = ligandScore(ligand, genes)
cluster_scores.append(ligand_score)
ligand_df[cluster] = cluster_scores
adata.uns["ligands"] = ligand_df.to_dict()
return adata
# Scoring logic for receptors
def receptorScore(receptor, genes):
"""calculate receptor score given receptor and gene set"""
from scipy.stats.mstats import gmean
gene_expression = list()
for gene in eval(receptor.gene):
try:
gene_expression.append(genes[gene])
except KeyError:
# If gene was not found append 0
#print(f"{gene} not found")
gene_expression.append(0.0)
# use max, as several genes might be found during ortholog search,
# not all might bee needed to create the receptor
gene_expression = max(gene_expression)
return gene_expression
def receptors(adata, organism=organism):
"""return a dataframe with receptor scores for each cluster.
.. note::
Needs a gene call dataframe under adata.uns.gene_call.
Use scConnect.genecall to create such dataframe.
Returns: Dict of receptor call for each cluster.
"""
import scConnect as cn
import pkg_resources
import pandas as pd
receptors = pd.read_csv(pkg_resources.resource_filename(
__name__, (f"data/Gene_annotation/{version}/{organism}/receptors.csv")))
receptor_df = pd.DataFrame(index=receptors.receptor)
for cluster, genes in adata.uns["gene_call"].items():
cluster_scores = list()
for receptor_data in receptors.iterrows():
receptor = receptor_data[1]
# fetch ligand score for specific ligand and gene set
receptor_score = receptorScore(receptor, genes)
cluster_scores.append(receptor_score)
receptor_df[cluster] = cluster_scores
adata.uns["receptors"] = receptor_df.to_dict()
return adata
# Interaction logic
def interactions(emitter, target, self_reference=True, organism=organism, corr_pval=True):
"""return an edge list of interactions between clusters.
If all connections are of interest, use the same data source for
emitter and target.
.. note::
self_reference is only valid when emitter == target.
.. note::
edge_list is returned as a list, and not in a adata object.
This is since multiple adata objects can be passed in to the
function, and whould lead to ambiguity of which object to append the edge_list to.
Returns: List of edges between given emmitor and target clusters.
"""
import pkg_resources
import pandas as pd
from itertools import product
from scConnect.tools import printProgressBar
interactions = pd.read_csv(pkg_resources.resource_filename(
__name__, (f"data/Gene_annotation/{version}/interactions.csv")), index_col=[0, 1], sep=";")
interactions.sort_index(axis="index", inplace=True)
# Create a set of all possible index combinations.
# This is used to test if ligand receptor combination is present.
interaction_set = set(interactions.index)
# An edge list should contain u, v and d,
# where u is input node, v is output node
# and d is a dictionary with edge attributes.
edge_list = list()
# get all clusters
# NOTE: if the same cluster name is used in emitter and target datasets, they are
# assumed to be the same cluster. Give your clusters uniqe names between your datasets.
try:
emitter_clusters = pd.DataFrame(emitter.uns["ligands"]).columns
target_clusters = pd.DataFrame(target.uns["ligands"]).columns
except KeyError:
print(
f"Please run connect.ligands() and connect.receptors() on your datasets first")
return
# Calculate total number of cluster combinations for the progress bar
if self_reference is True:
total_comb = len(list(product(emitter_clusters, target_clusters)))
else:
total_comb = len([(e, t) for (e, t) in product(
emitter_clusters, target_clusters) if e != t])
ligands = pd.DataFrame(emitter.uns["ligands"])
receptors = pd.DataFrame(target.uns["receptors"])
# load extra ligand and receptor statistics
ligands_zscore = pd.DataFrame(emitter.uns["ligands_zscore"])
receptors_zscore = pd.DataFrame(target.uns["receptors_zscore"])
if corr_pval:
ligands_pval = pd.DataFrame(emitter.uns["ligands_corr_pval"])
receptors_pval = pd.DataFrame(target.uns["receptors_corr_pval"])
else:
ligands_pval = pd.DataFrame(emitter.uns["ligands_pval"])
receptors_pval = pd.DataFrame(target.uns["receptors_pval"])
# Fetch receptor and ligand information
receptor_info = pd.read_csv(pkg_resources.resource_filename(
__name__, (f"data/Gene_annotation/{version}/{organism}/receptors.csv")), index_col=1)
receptor_info = receptor_info[["family", "gene"]]
ligand_info = pd.read_csv(pkg_resources.resource_filename(
__name__, (f"data/Gene_annotation/{version}/{organism}/ligands.csv")), index_col=1)
ligand_info = ligand_info[["ligand_type", "comment"]]
# Nested for-loop to get all combinations of
# interactions between clusters.
comb_tried = 0
for emitter_cluster in emitter_clusters:
for target_cluster in target_clusters:
# Are we interested in self referencing information?
# I leave that up to the user
if emitter_cluster != target_cluster or self_reference == True:
# Get only ligands and receptors expressed by the clusters
# (speeds up itterative functions later)
emitter_ligands = ligands[emitter_cluster][ligands[emitter_cluster] > 0]
target_receptors = receptors[target_cluster][receptors[target_cluster] > 0]
connections = get_connections(
emitter_ligands,
target_receptors,
interactions,
interaction_set,
receptor_info,
ligand_info,
emitter_cluster,
target_cluster,
ligands_zscore,
ligands_pval,
receptors_zscore,
receptors_pval)
if len(connections) > 0:
for connection in connections:
edge_list.append(connection)
# Add the progress bar
comb_tried += 1
printProgressBar(
comb_tried, total_comb, prefix=f"finding connections between {len(emitter_clusters)} emitter clusters and {len(target_clusters)} target clusters")
return edge_list
# get all connections based on Ligands and receptors, and provide score for interactions
# Also provide meta data as a dictionary for interaction
def scale(value, from_range=(0, 1), to_range=(10E-100, 1)): # mitagate log with 0
value = to_range[0] + (to_range[1] - to_range[0]) * (value -from_range[0]) / (to_range[1] - to_range[0])
return value
def get_connections(
ligands,
receptors,
interactions,
interaction_set,
receptor_info,
ligand_info,
emitter_cluster,
target_cluster,
ligands_zscore,
ligands_pval,
receptors_zscore,
receptors_pval):
"""finds connections between ligands and receptors
and return a score and metadata for each interaction"""
from scipy.stats.mstats import gmean
import numpy as np
# shorten the list of interactions to only contain relevant ligands.
# This should speed up the algorithm
ligand_filter = [True if ligand in ligands.keys() else False for ligand in interactions.index.get_level_values(0)]
interactions = interactions.loc[ligand_filter]
def interaction_specificity(l, r): # used to calculate interaction specificity score
sig = -np.log10((l+r)/2)
return sig
connections = list()
for ligand, l_score in ligands.iteritems():
for receptor, r_score in receptors.iteritems():
if (ligand, receptor) in interaction_set:
interaction = interactions.loc[ligand, receptor]
score = float(gmean((l_score, r_score)))
ligand_pval = float(ligands_pval[emitter_cluster][ligand])
receptor_pval = float(receptors_pval[target_cluster][receptor])
specificity = float(interaction_specificity(ligand_pval, receptor_pval))
log_score = float(np.log10(score + 1))
importance = specificity * log_score
connections.append((ligands.name, receptors.name, {
"score": float(score),
"log_score": log_score, # From here on, all values are +1ed and logaritmized with base of 10. # From here on, all values are +1ed and logaritmized with base of 10.
"ligand": str(ligand),
"ligand_zscore": float(ligands_zscore[emitter_cluster][ligand]),
"ligand_pval": ligand_pval,
"receptor": str(receptor),
"receptor_zscore": float(receptors_zscore[target_cluster][receptor]),
"receptor_pval": receptor_pval,
"interaction": f"{ligand} --> {receptor}",
"specificity": specificity,
"importance": importance,
"endogenous": f"{list(interaction.endogenous)}",
"action": f"{list(interaction.action)}",
"ligandspecies": f"{list(interaction.ligand_species)}",
"receptorspecies": f"{list(interaction.target_species)}",
"pubmedid": f"{list(interaction.pubmed_id)[:5]}",
"receptorfamily": str(receptor_info.loc[receptor]["family"]),
"receptorgene": str(receptor_info.loc[receptor]["gene"]),
"ligandtype": str(ligand_info.loc[ligand]["ligand_type"]),
"ligandcomment": str(ligand_info.loc[ligand]["comment"])}))
return connections
def nodes(adatas):
"""
Returns an list of nodes, attributes dictionary tuples.
Each tuple represent one node with an attribute dictionary:
*(cluster, dict(receptors: dict(receptor:score), ligands: dict(ligand:score) ))*
"""
if not isinstance(adatas, list):
adatas = [adatas, ]
nodes = []
for i, adata in enumerate(adatas):
print(f"precessing adata #{i+1}")
ligands_score = adata.uns["ligands"]
ligands_zscore = adata.uns["ligands_zscore"]
ligands_pval = adata.uns["ligands_pval"]
ligands_corr_pval = adata.uns["ligands_corr_pval"]
receptors_score = adata.uns["receptors"]
receptors_zscore = adata.uns["receptors_zscore"]
receptors_pval = adata.uns["receptors_pval"]
receptors_corr_pval = adata.uns["receptors_corr_pval"]
genes = adata.uns["gene_call"]
clusters = ligands_score.keys()
# Filter out ligands with positive score (remove non expressing ligands and receptors)
for cluster in clusters:
print(f"processing cluster {cluster}")
cluster_ligands_score = {k: v for k,
v in ligands_score[cluster].items() if v > 0}
cluster_receptors_score = {k: v for k,
v in receptors_score[cluster].items() if v > 0}
# Add all information to the node dicionary
node = (cluster, {
"ligands_score": cluster_ligands_score,
"ligands_zscore": ligands_zscore[cluster],
"ligands_pval": ligands_pval[cluster],
"ligands_corr_pval": ligands_corr_pval[cluster],
"receptors_score": cluster_receptors_score,
"receptors_zscore": receptors_zscore[cluster],
"receptors_pval": receptors_pval[cluster],
"receptors_corr_pval": receptors_corr_pval[cluster],
"genes": genes[cluster]})
nodes.append(node)
return nodes
# Statistic inference of ligand and receptor scores
# Here we shuffel the group annotations many times, calculate ligand and receptor scores
# and find the mean and standard deviation for each ligand/receptor score for each gorup.
# We can then calculate the z-score of the true ligand/receptor score, p-values and corrected p-values
# Data an be used to detect group specific expression of ligands and receptors.
def _ligand_receptor_call(adata, groupby, organism, transformation, return_df = True):
import pandas as pd
adata = cn.genecall.meanExpression(adata, groupby=groupby, normalization=False, use_raw=False, transformation=transformation)
adata = cn.connect.ligands(adata, organism=organism)
adata = cn.connect.receptors(adata, organism=organism)
ligands = pd.DataFrame(adata.uns["ligands"])
receptors = | pd.DataFrame(adata.uns["receptors"]) | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import numpy.linalg as LA
from scipy.sparse import csr_matrix
from sklearn.preprocessing import MinMaxScaler
def show_mtrx(m, title = None):
fig, ax = plt.subplots(figsize = (10, 5))
min_val = int(m.min())
max_val = int(m.max())
cax = ax.matshow(m, cmap=plt.cm.seismic)
fig.colorbar(cax, ticks=[min_val, int((min_val + max_val)/2), max_val])
plt.title(title)
plt.show()
def plot_results(MADs, MSEs):
f, axes = plt.subplots(1, 2, figsize=(10, 5))
if len(MADs) == 3:
mad = {"K": list(range(2, 2 + len(MADs[0]))), "xTx": MADs[0], "zTz": MADs[1], "rTr": MADs[2]}
else:
mad = {"K": list(range(2, 2 + len(MADs[0]))), "xTx": MADs[0], "zTz": MADs[1]}
df_mad = pd.DataFrame(mad)
melted = df_mad.melt(id_vars="K", var_name="technique")
sns.barplot(x="K", y="value", hue="technique", data=melted, ax=axes[0])
if len(MADs) == 3:
mse = {"K": list(range(2, 2 + len(MSEs[0]))), "xTx": MSEs[0], "zTz": MSEs[1], "rTr": MSEs[2]}
else:
mse = {"K": list(range(2, 2 + len(MSEs[0]))), "xTx": MSEs[0], "zTz": MSEs[1]}
df_mse = | pd.DataFrame(mse) | pandas.DataFrame |
from datetime import datetime
from io import StringIO
import itertools
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Period,
Series,
Timedelta,
date_range,
)
import pandas._testing as tm
class TestDataFrameReshape:
def test_stack_unstack(self, float_frame):
df = float_frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({"foo": stacked, "bar": stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
tm.assert_frame_equal(unstacked, df)
tm.assert_frame_equal(unstacked_df["bar"], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
tm.assert_frame_equal(unstacked_cols.T, df)
tm.assert_frame_equal(unstacked_cols_df["bar"].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, "a", "b"], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
tm.assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0], columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(
1, index=MultiIndex.from_product([levels[0], levels[2]]), columns=levels[1]
)
tm.assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[["a", "b"]].stack(1)
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_unstack_not_consolidated(self, using_array_manager):
# Gh#34708
df = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
df2 = df[["x"]]
df2["y"] = df["y"]
if not using_array_manager:
assert len(df2._mgr.blocks) == 2
res = df2.unstack()
expected = df.unstack()
tm.assert_series_equal(res, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack(fill_value=-1)
expected = DataFrame(
{"a": [1, -1, 5], "b": [2, 4, -1]}, index=["x", "y", "z"], dtype=np.int16
)
tm.assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame(
{"a": [1, 0.5, 5], "b": [2, 4, 0.5]}, index=["x", "y", "z"], dtype=float
)
tm.assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame(
{"x": ["a", "a", "b"], "y": ["j", "k", "j"], "z": [0, 1, 2], "w": [0, 1, 2]}
).set_index(["x", "y", "z"])
unstacked = df.unstack(["x", "y"], fill_value=0)
key = ("<KEY>")
expected = unstacked[key]
result = Series([0, 0, 2], index=unstacked.index, name=key)
tm.assert_series_equal(result, expected)
stacked = unstacked.stack(["x", "y"])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
tm.assert_frame_equal(result, df)
# From a series
s = df["w"]
result = s.unstack(["x", "y"], fill_value=0)
expected = unstacked["w"]
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list("AB"), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list("xyz"), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
# From a mixed type dataframe
df["A"] = df["A"].astype(np.int16)
df["B"] = df["B"].astype(np.float64)
result = df.unstack(fill_value=-1)
expected["A"] = expected["A"].astype(np.int16)
expected["B"] = expected["B"].astype(np.float64)
tm.assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list("xyz"), dtype=float)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = date_range("2012-01-01", periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [dv[0], pd.NaT, dv[3]], "b": [dv[1], dv[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame(
{"a": [dv[0], dv[0], dv[3]], "b": [dv[1], dv[2], dv[0]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [td[0], pd.NaT, td[3]], "b": [td[1], td[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame(
{"a": [td[0], td[1], td[3]], "b": [td[1], td[2], td[1]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [
Period("2012-01"),
Period("2012-02"),
Period("2012-03"),
Period("2012-04"),
]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [periods[0], None, periods[3]], "b": [periods[1], periods[2], None]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame(
{
"a": [periods[0], periods[1], periods[3]],
"b": [periods[1], periods[2], periods[1]],
},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = Series(["a", "b", "c", "a"], dtype="category")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{
"a": pd.Categorical(list("axa"), categories=list("abc")),
"b": pd.Categorical(list("bcx"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
# Fill with non-category results in a ValueError
msg = r"'fill_value=d' is not present in"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value="d")
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value="c")
expected = DataFrame(
{
"a": pd.Categorical(list("aca"), categories=list("abc")),
"b": pd.Categorical(list("bcc"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_tuplename_in_multiindex(self):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")]
)
df = DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx)
result = df.unstack(("A", "a"))
expected = DataFrame(
[[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]],
columns=MultiIndex.from_tuples(
[
("d", "a"),
("d", "b"),
("d", "c"),
("e", "a"),
("e", "b"),
("e", "c"),
],
names=[None, ("A", "a")],
),
index=Index([1, 2, 3], name=("B", "b")),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"unstack_idx, expected_values, expected_index, expected_columns",
[
(
("A", "a"),
[[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]],
MultiIndex.from_tuples(
[(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"]
),
MultiIndex.from_tuples(
[("d", "a"), ("d", "b"), ("e", "a"), ("e", "b")],
names=[None, ("A", "a")],
),
),
(
(("A", "a"), "B"),
[[1, 1, 1, 1, 2, 2, 2, 2], [1, 1, 1, 1, 2, 2, 2, 2]],
Index([3, 4], name="C"),
MultiIndex.from_tuples(
[
("d", "a", 1),
("d", "a", 2),
("d", "b", 1),
("d", "b", 2),
("e", "a", 1),
("e", "a", 2),
("e", "b", 1),
("e", "b", 2),
],
names=[None, ("A", "a"), "B"],
),
),
],
)
def test_unstack_mixed_type_name_in_multiindex(
self, unstack_idx, expected_values, expected_index, expected_columns
):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"]
)
df = DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx)
result = df.unstack(unstack_idx)
expected = DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
tm.assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = DataFrame(
{
"state": ["IL", "MI", "NC"],
"index": ["a", "b", "c"],
"some_categories": Series(["a", "b", "c"]).astype("category"),
"A": np.random.rand(3),
"B": 1,
"C": "foo",
"D": pd.Timestamp("20010102"),
"E": Series([1.0, 50.0, 100.0]).astype("float32"),
"F": Series([3.0, 4.0, 5.0]).astype("float64"),
"G": False,
"H": Series([1, 200, 923442], dtype="int8"),
}
)
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
tm.assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(["state", "index"])
unstack_and_compare(df1, "index")
df1 = df.set_index(["state", "some_categories"])
unstack_and_compare(df1, "some_categories")
df1 = df.set_index(["F", "C"])
unstack_and_compare(df1, "F")
df1 = df.set_index(["G", "B", "state"])
unstack_and_compare(df1, "B")
df1 = df.set_index(["E", "A"])
unstack_and_compare(df1, "E")
df1 = df.set_index(["state", "index"])
s = df1["A"]
unstack_and_compare(s, "index")
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3), repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
tm.assert_frame_equal(df.stack(level=[1, 2]), df.stack(level=1).stack(level=1))
tm.assert_frame_equal(
df.stack(level=[-2, -1]), df.stack(level=1).stack(level=1)
)
df_named = df.copy()
return_value = df_named.columns.set_names(range(3), inplace=True)
assert return_value is None
tm.assert_frame_equal(
df_named.stack(level=[1, 2]), df_named.stack(level=1).stack(level=1)
)
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ["exp", "animal", 1]
tm.assert_frame_equal(
df2.stack(level=["animal", 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=["exp", 1]), exp_hair_stacked, check_names=False
)
# When mixed types are passed and the ints are not level
# names, raise
msg = (
"level should contain all level names or all level numbers, not "
"a mixture of the two"
)
with pytest.raises(ValueError, match=msg):
df2.stack(level=["animal", 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ["exp", "animal", 0]
tm.assert_frame_equal(
df3.stack(level=["animal", 0]), animal_hair_stacked, check_names=False
)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=["exp", "animal"])
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
tm.assert_frame_equal(
df2.stack(level=[1, 2]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 1]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 2]), exp_hair_stacked, check_names=False
)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
tm.assert_frame_equal(
df3.stack(level=[0, 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 0]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 1]), exp_hair_stacked, check_names=False
)
def test_unstack_bool(self):
df = DataFrame(
[False, False],
index=MultiIndex.from_arrays([["a", "b"], ["c", "l"]]),
columns=["col"],
)
rs = df.unstack()
xp = DataFrame(
np.array([[False, np.nan], [np.nan, False]], dtype=object),
index=["a", "b"],
columns=MultiIndex.from_arrays([["col", "col"], ["c", "l"]]),
)
tm.assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"], ["a", "b"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=["first", "second", "third"],
)
s = Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=["first", "second"],
)
expected = DataFrame(
np.array(
[[np.nan, 0], [0, np.nan], [np.nan, 0], [0, np.nan]], dtype=np.float64
),
index=expected_mi,
columns=Index(["a", "b"], name="third"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_to_series(self, float_frame):
# check reversibility
data = float_frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
tm.assert_frame_equal(undo, float_frame)
# check NA handling
data = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
data.index = Index(["a", "b", "c"])
result = data.unstack()
midx = MultiIndex(
levels=[["x", "y"], ["a", "b", "c"]],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
)
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
tm.assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
tm.assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4], [1, 2, 3, 4], [2, 1, 3, 4], [2, 2, 3, 4]]
df = DataFrame(rows, columns=list("ABCD"))
result = df.dtypes
expected = Series([np.dtype("int64")] * 4, index=list("ABCD"))
tm.assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(["A", "B"])
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("int64")] * 4,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# mixed
df2 = df.set_index(["A", "B"])
df2["C"] = 3.0
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("int64")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
df2["D"] = "foo"
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("object")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# GH7405
for c, d in (
(np.zeros(5), np.zeros(5)),
(np.arange(5, dtype="f8"), np.arange(5, 10, dtype="f8")),
):
df = DataFrame(
{
"A": ["a"] * 5,
"C": c,
"D": d,
"B": date_range("2012-01-01", periods=5),
}
)
right = df.iloc[:3].copy(deep=True)
df = df.set_index(["A", "B"])
df["D"] = df["D"].astype("int64")
left = df.iloc[:3].unstack(0)
right = right.set_index(["A", "B"]).unstack(0)
right[("D", "a")] = right[("D", "a")].astype("int64")
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([("a", "b"), ("c", "d")], names=["c1", "c1"])
df = DataFrame([1, 2], index=idx)
msg = "The name c1 occurs multiple times, use a level number"
with pytest.raises(ValueError, match=msg):
df.unstack("c1")
with pytest.raises(ValueError, match=msg):
df.T.stack("c1")
def test_unstack_unused_levels(self):
# GH 17845: unused codes in index make unstack() cast int to float
idx = MultiIndex.from_product([["a"], ["A", "B", "C", "D"]])[:-1]
df = DataFrame([[1, 0]] * 3, index=idx)
result = df.unstack()
exp_col = MultiIndex.from_product([[0, 1], ["A", "B", "C"]])
expected = DataFrame([[1, 1, 1, 0, 0, 0]], index=["a"], columns=exp_col)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# Unused items on both levels
levels = [[0, 1, 7], [0, 1, 2, 3]]
codes = [[0, 0, 1, 1], [0, 2, 0, 2]]
idx = MultiIndex(levels, codes)
block = np.arange(4).reshape(2, 2)
df = DataFrame(np.concatenate([block, block + 4]), index=idx)
result = df.unstack()
expected = DataFrame(
np.concatenate([block * 2, block * 2 + 1], axis=1), columns=idx
)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# With mixed dtype and NaN
levels = [["a", 2, "c"], [1, 3, 5, 7]]
codes = [[0, -1, 1, 1], [0, 2, -1, 2]]
idx = MultiIndex(levels, codes)
data = np.arange(8)
df = DataFrame(data.reshape(4, 2), index=idx)
cases = (
(0, [13, 16, 6, 9, 2, 5, 8, 11], [np.nan, "a", 2], [np.nan, 5, 1]),
(1, [8, 11, 1, 4, 12, 15, 13, 16], [np.nan, 5, 1], [np.nan, "a", 2]),
)
for level, idces, col_level, idx_level in cases:
result = df.unstack(level=level)
exp_data = np.zeros(18) * np.nan
exp_data[idces] = data
cols = MultiIndex.from_product([[0, 1], col_level])
expected = DataFrame(exp_data.reshape(3, 6), index=idx_level, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("cols", [["A", "C"], slice(None)])
def test_unstack_unused_level(self, cols):
# GH 18562 : unused codes on the unstacked level
df = DataFrame([[2010, "a", "I"], [2011, "b", "II"]], columns=["A", "B", "C"])
ind = df.set_index(["A", "B", "C"], drop=False)
selection = ind.loc[(slice(None), slice(None), "I"), cols]
result = selection.unstack()
expected = ind.iloc[[0]][cols]
expected.columns = MultiIndex.from_product(
[expected.columns, ["I"]], names=[None, "C"]
)
expected.index = expected.index.droplevel("C")
tm.assert_frame_equal(result, expected)
def test_unstack_long_index(self):
# PH 32624: Error when using a lot of indices to unstack.
# The error occurred only, if a lot of indices are used.
df = DataFrame(
[[1]],
columns=MultiIndex.from_tuples([[0]], names=["c1"]),
index=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["i1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
)
result = df.unstack(["i2", "i3", "i4", "i5", "i6", "i7"])
expected = DataFrame(
[[1]],
columns=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["c1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
index=Index([0], name="i1"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_multi_level_cols(self):
# PH 24729: Unstack a df with multi level columns
df = DataFrame(
[[0.0, 0.0], [0.0, 0.0]],
columns=MultiIndex.from_tuples(
[["B", "C"], ["B", "D"]], names=["c1", "c2"]
),
index=MultiIndex.from_tuples(
[[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"]
),
)
assert df.unstack(["i2", "i1"]).columns.names[-2:] == ["i2", "i1"]
def test_unstack_multi_level_rows_and_cols(self):
# PH 28306: Unstack df with multi level cols and rows
df = DataFrame(
[[1, 2], [3, 4], [-1, -2], [-3, -4]],
columns=MultiIndex.from_tuples([["a", "b", "c"], ["d", "e", "f"]]),
index=MultiIndex.from_tuples(
[
["m1", "P3", 222],
["m1", "A5", 111],
["m2", "P3", 222],
["m2", "A5", 111],
],
names=["i1", "i2", "i3"],
),
)
result = df.unstack(["i3", "i2"])
expected = df.unstack(["i3"]).unstack(["i2"])
tm.assert_frame_equal(result, expected)
def test_unstack_nan_index1(self):
# GH7466
def cast(val):
val_str = "" if val != val else val
return f"{val_str:1}"
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split("."))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(map(cast, right))
assert left == right
df = DataFrame(
{
"jim": ["a", "b", np.nan, "d"],
"joe": ["w", "x", "y", "z"],
"jolie": ["a.w", "b.x", " .y", "d.z"],
}
)
left = df.set_index(["jim", "joe"]).unstack()["jolie"]
right = df.set_index(["joe", "jim"]).unstack()["jolie"].T
tm.assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf["jolie"])
df = DataFrame(
{
"1st": ["d"] * 3
+ [np.nan] * 5
+ ["a"] * 2
+ ["c"] * 3
+ ["e"] * 2
+ ["b"] * 5,
"2nd": ["y"] * 2
+ ["w"] * 3
+ [np.nan] * 3
+ ["z"] * 4
+ [np.nan] * 3
+ ["x"] * 3
+ [np.nan] * 2,
"3rd": [
67,
39,
53,
72,
57,
80,
31,
18,
11,
30,
59,
50,
62,
59,
76,
52,
14,
53,
60,
51,
],
}
)
df["4th"], df["5th"] = (
df.apply(lambda r: ".".join(map(cast, r)), axis=1),
df.apply(lambda r: ".".join(map(cast, r.iloc[::-1])), axis=1),
)
for idx in itertools.permutations(["1st", "2nd", "3rd"]):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ["4th", "5th"]:
verify(udf[col])
def test_unstack_nan_index2(self):
# GH7403
df = DataFrame({"A": list("aaaabbbb"), "B": range(8), "C": range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [
[3, 0, 1, 2, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, 4, 5, 6, 7],
]
vals = list(map(list, zip(*vals)))
idx = Index([np.nan, 0, 1, 2, 4, 5, 6, 7], name="B")
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[2, np.nan], [0, 4], [1, 5], [np.nan, 6], [3, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[3, np.nan], [0, 4], [1, 5], [2, 6], [np.nan, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
def test_unstack_nan_index3(self, using_array_manager):
# GH7401
df = DataFrame(
{
"A": list("aaaaabbbbb"),
"B": (date_range("2012-01-01", periods=5).tolist() * 2),
"C": np.arange(10),
}
)
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack()
vals = np.array([[3, 0, 1, 2, np.nan, 4], [np.nan, 5, 6, 7, 8, 9]])
idx = Index(["a", "b"], name="A")
cols = MultiIndex(
levels=[["C"], date_range("2012-01-01", periods=5)],
codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, "B"],
)
right = DataFrame(vals, columns=cols, index=idx)
if using_array_manager:
# INFO(ArrayManager) with ArrayManager preserve dtype where possible
cols = right.columns[[1, 2, 3, 5]]
right[cols] = right[cols].astype(df["C"].dtype)
tm.assert_frame_equal(left, right)
def test_unstack_nan_index4(self):
# GH4862
vals = [
["Hg", np.nan, np.nan, 680585148],
["U", 0.0, np.nan, 680585148],
["Pb", 7.07e-06, np.nan, 680585148],
["Sn", 2.3614e-05, 0.0133, 680607017],
["Ag", 0.0, 0.0133, 680607017],
["Hg", -0.00015, 0.0133, 680607017],
]
df = DataFrame(
vals,
columns=["agent", "change", "dosage", "s_id"],
index=[17263, 17264, 17265, 17266, 17267, 17268],
)
left = df.copy().set_index(["s_id", "dosage", "agent"]).unstack()
vals = [
[np.nan, np.nan, 7.07e-06, np.nan, 0.0],
[0.0, -0.00015, np.nan, 2.3614e-05, np.nan],
]
idx = MultiIndex(
levels=[[680585148, 680607017], [0.0133]],
codes=[[0, 1], [-1, 0]],
names=["s_id", "dosage"],
)
cols = MultiIndex(
levels=[["change"], ["Ag", "Hg", "Pb", "Sn", "U"]],
codes=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, "agent"],
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
left = df.loc[17264:].copy().set_index(["s_id", "dosage", "agent"])
tm.assert_frame_equal(left.unstack(), right)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) MultiIndex bug
def test_unstack_nan_index5(self):
# GH9497 - multiple unstack with nulls
df = DataFrame(
{
"1st": [1, 2, 1, 2, 1, 2],
"2nd": date_range("2014-02-01", periods=6, freq="D"),
"jim": 100 + np.arange(6),
"joe": (np.random.randn(6) * 10).round(2),
}
)
df["3rd"] = df["2nd"] - pd.Timestamp("2014-02-02")
df.loc[1, "2nd"] = df.loc[3, "2nd"] = np.nan
df.loc[1, "3rd"] = df.loc[4, "3rd"] = np.nan
left = df.set_index(["1st", "2nd", "3rd"]).unstack(["2nd", "3rd"])
assert left.notna().values.sum() == 2 * len(df)
for col in ["jim", "joe"]:
for _, r in df.iterrows():
key = r["1st"], (col, r["2nd"], r["3rd"])
assert r[col] == left.loc[key]
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame([1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, "A", "B")]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ("B",)])
ecols = MultiIndex.from_tuples([(t, "A")])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
tm.assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(
np.arange(3 * len(multiindex)).reshape(3, len(multiindex)),
columns=multiindex,
)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(
df.columns.to_numpy(), names=df.columns.names
)
expected = df.stack(level=level, dropna=False)
if isinstance(expected, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
full_multiindex = MultiIndex.from_tuples(
[("B", "x"), ("B", "z"), ("A", "y"), ("C", "x"), ("C", "u")],
names=["Upper", "Lower"],
)
for multiindex_columns in (
[0, 1, 2, 3, 4],
[0, 1, 2, 3],
[0, 1, 2, 4],
[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[0, 1],
[0, 2],
[0, 3],
[0],
[2],
[4],
):
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
if len(multiindex_columns) > 1:
multiindex_columns.reverse()
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
df = DataFrame(np.arange(6).reshape(2, 3), columns=full_multiindex[[0, 1, 3]])
result = df.stack(dropna=False)
expected = DataFrame(
[[0, 2], [1, np.nan], [3, 5], [4, np.nan]],
index=MultiIndex(
levels=[[0, 1], ["u", "x", "y", "z"]],
codes=[[0, 0, 1, 1], [1, 3, 1, 3]],
names=[None, "Lower"],
),
columns=Index(["B", "C"], name="Upper"),
dtype=df.dtypes[0],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize("labels", [list("yxz"), list("yxy")])
def test_stack_preserve_categorical_dtype(self, ordered, labels):
# GH13854
cidx = pd.CategoricalIndex(labels, categories=list("xyz"), ordered=ordered)
df = DataFrame([[10, 11, 12]], columns=cidx)
result = df.stack()
# `MultiIndex.from_product` preserves categorical dtype -
# it's tested elsewhere.
midx = MultiIndex.from_product([df.index, cidx])
expected = Series([10, 11, 12], index=midx)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize(
"labels,data",
[
(list("xyz"), [10, 11, 12, 13, 14, 15]),
(list("zyx"), [14, 15, 12, 13, 10, 11]),
],
)
def test_stack_multi_preserve_categorical_dtype(self, ordered, labels, data):
# GH-36991
cidx = pd.CategoricalIndex(labels, categories=sorted(labels), ordered=ordered)
cidx2 = pd.CategoricalIndex(["u", "v"], ordered=ordered)
midx = MultiIndex.from_product([cidx, cidx2])
df = DataFrame([sorted(data)], columns=midx)
result = df.stack([0, 1])
s_cidx = pd.CategoricalIndex(sorted(labels), ordered=ordered)
expected = Series(data, index=MultiIndex.from_product([[0], s_cidx, cidx2]))
tm.assert_series_equal(result, expected)
def test_stack_preserve_categorical_dtype_values(self):
# GH-23077
cat = pd.Categorical(["a", "a", "b", "c"])
df = DataFrame({"A": cat, "B": cat})
result = df.stack()
index = MultiIndex.from_product([[0, 1, 2, 3], ["A", "B"]])
expected = Series(
pd.Categorical(["a", "a", "a", "a", "b", "b", "c", "c"]), index=index
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"index, columns",
[
([0, 0, 1, 1], MultiIndex.from_product([[1, 2], ["a", "b"]])),
([0, 0, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])),
([0, 1, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])),
],
)
def test_stack_multi_columns_non_unique_index(self, index, columns):
# GH-28301
df = DataFrame(index=index, columns=columns).fillna(1)
stacked = df.stack()
new_index = MultiIndex.from_tuples(stacked.index.to_numpy())
expected = DataFrame(
stacked.to_numpy(), index=new_index, columns=stacked.columns
)
tm.assert_frame_equal(stacked, expected)
stacked_codes = np.asarray(stacked.index.codes)
expected_codes = np.asarray(new_index.codes)
tm.assert_numpy_array_equal(stacked_codes, expected_codes)
@pytest.mark.parametrize("level", [0, 1])
def test_unstack_mixed_extension_types(self, level):
index = MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 1)], names=["a", "b"])
df = DataFrame(
{
"A": pd.array([0, 1, None], dtype="Int64"),
"B": pd.Categorical(["a", "a", "b"]),
},
index=index,
)
result = df.unstack(level=level)
expected = df.astype(object).unstack(level=level)
expected_dtypes = Series(
[df.A.dtype] * 2 + [df.B.dtype] * 2, index=result.columns
)
tm.assert_series_equal(result.dtypes, expected_dtypes)
tm.assert_frame_equal(result.astype(object), expected)
@pytest.mark.parametrize("level", [0, "baz"])
def test_unstack_swaplevel_sortlevel(self, level):
# GH 20994
mi = MultiIndex.from_product([[0], ["d", "c"]], names=["bar", "baz"])
df = DataFrame([[0, 2], [1, 3]], index=mi, columns=["B", "A"])
df.columns.name = "foo"
expected = DataFrame(
[[3, 1, 2, 0]],
columns=MultiIndex.from_tuples(
[("c", "A"), ("c", "B"), ("d", "A"), ("d", "B")], names=["baz", "foo"]
),
)
expected.index.name = "bar"
result = df.unstack().swaplevel(axis=1).sort_index(axis=1, level=level)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_object():
# GH12815 Test unstacking with object.
data = Series(["a", "b", "c", "a"], dtype="object")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{"a": ["a", np.nan, "a"], "b": ["b", "c", np.nan]}, index=list("xyz")
)
tm.assert_frame_equal(result, expected)
# Fill with any value replaces missing values as expected
result = data.unstack(fill_value="d")
expected = DataFrame(
{"a": ["a", "d", "a"], "b": ["b", "c", "d"]}, index=list("xyz")
)
tm.assert_frame_equal(result, expected)
def test_unstack_timezone_aware_values():
# GH 18338
df = DataFrame(
{
"timestamp": [pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC")],
"a": ["a"],
"b": ["b"],
"c": ["c"],
},
columns=["timestamp", "a", "b", "c"],
)
result = df.set_index(["a", "b"]).unstack()
expected = DataFrame(
[[pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC"), "c"]],
index=Index(["a"], name="a"),
columns=MultiIndex(
levels=[["timestamp", "c"], ["b"]],
codes=[[0, 1], [0, 0]],
names=[None, "b"],
),
)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
| tm.assert_almost_equal(df2.values, expected) | pandas.util.testing.assert_almost_equal |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import os.path as op
import sys
import pandas as pd
import logging
#import simplejson as json
import yaml
from jcvi.apps.base import sh, mkdir
def get_gsize(fs):
cl = pd.read_csv(fs, sep="\t", header=None, names=['chrom','size'])
return sum(cl['size'])
def tsv2yml(args):
cvts = dict(genome=str,species=str,source=str)
gl = pd.read_csv(args.fi, sep="\t", header=0, converters=cvts,
true_values=['1','Y','Yes','T','True'],
false_values=['0','N','No','F','False'])
jd = dict()
for i in range(len(gl)):
genome, species, source, status = \
gl['genome'][i], gl['species'][i], gl['source'][i], gl['status'][i]
#if not status in ['C','T']: continue
jd1 = dict()
pre = "%s/%s" % (args.dirg, genome)
jd1['alias'] = gl['alias'][i]
jd1['prefix'] = gl['prefix'][i]
jd1['fasta'] = "%s/10.fasta" % pre
jd1['fasta_idx'] = "%s/10.fasta.fai" % pre
jd1['genome_bed'] = "%s/15_intervals/01.chrom.bed" % pre
jd1['genome_sizes'] = "%s/15_intervals/01.chrom.sizes" % pre
jd1['gap_bed'] = "%s/15_intervals/11.gap.bed" % pre
if op.isfile(jd1['genome_sizes']):
jd1['macs_gsize'] = get_gsize(jd1['genome_sizes'])
# annotation
jd1['gff'] = "%s/50_annotation/10.gff" % pre
jd1['gff_db'] = "%s/50_annotation/10.gff.db" % pre
jd1['gtf'] = "%s/50_annotation/10.gtf" % pre
jd1['bed'] = "%s/50_annotation/10.bed" % pre
jd1['fna'] = "%s/50_annotation/10.nt.fasta" % pre
jd1['faa'] = "%s/50_annotation/10.aa.fasta" % pre
jd1['tss'] = "%s/50_annotation/10.tss.bed" % pre
jd1['pgff'] = "%s/50_annotation/15.gff" % pre
jd1['pgff_db'] = "%s/50_annotation/15.gff.db" % pre
jd1['pgtf'] = "%s/50_annotation/15.gtf" % pre
jd1['pbed'] = "%s/50_annotation/15.bed" % pre
jd1['pfna'] = "%s/50_annotation/15.nt.fasta" % pre
jd1['pfaa'] = "%s/50_annotation/15.aa.fasta" % pre
if gl['blat'][i]:
jd1['blat'] = "%s/21_dbs/blat/db.2bit" % pre
if gl['gatk'][i]:
jd1['gatk'] = f"{pre}/21_dbs/gatk/"
if gl['star'][i]:
jd1['star'] = "%s/21_dbs/star/" % pre
if gl['hisat2'][i]:
jd1['hisat2'] = "%s/21_dbs/hisat2/db" % pre
if genome in ['Zmays_B73']:
jd1['hisat2'] = "%s/21_dbs/hisat2/B73_vt01/db" % pre
if gl['bwa'][i]:
jd1['bwa'] = "%s/21_dbs/bwa/db" % pre
if gl['bismark'][i]:
jd1['bismark'] = "%s/21_dbs/bismark" % pre
if gl['salmon'][i]:
jd1['salmon'] = "%s/21_dbs/salmon/db" % pre
jd1['tx2gene'] = "%s/21_dbs/salmon/tx2gene.csv" % pre
if gl['rcfg'][i]:
jd1['rcfg'] = "%s/55.rds" % pre
if gl['blast'][i]:
jd1['blastp'] = f"{pre}/21_dbs/blastp"
jd1['blastn'] = f"{pre}/21_dbs/blastn"
win11 = "%s/15_intervals/20.win11.tsv" % pre
win56 = "%s/15_intervals/20.win56.tsv" % pre
win127 = "%s/15_intervals/20.win127.tsv" % pre
if op.isfile(win11): jd1['win11'] = win11
if op.isfile(win56): jd1['win56'] = win56
if op.isfile(win127): jd1['win127'] = win127
jd1['fc_group_features'] = 'gene_id'
jd1['fc_group_features_type'] = 'gene_biotype'
jd[genome] = jd1
#j = dict(params = dict(genomes = jd))
j = dict(genomes = jd)
with open(args.fo, 'w') as outfile:
yaml.dump(j, outfile)
# with open(args.json, 'w') as outfile:
# json.dump(j, outfile)
def download(args):
cvts = dict(genome=str,species=str,source=str)
gl = pd.read_csv(args.cfg, sep="\t", header=0, converters=cvts,
true_values=['1','Y','Yes','T','True'],
false_values=['0','N','No','F','False'])
url_pre = "http://ftp.ebi.ac.uk/ensemblgenomes/pub"
for i in range(len(gl)):
if | pd.isna(gl['status'][i]) | pandas.isna |
"""
Functions used for pre-processing
"""
#import math
import pickle
#import copy
#import config
import os
# for multiprocessing
from functools import partial
from multiprocessing import Pool, cpu_count
from joblib import Parallel, delayed
import joblib
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
def load_results(filename):
""" Load a pickle file
"""
with open(filename, 'rb') as file_to_load:
data = pickle.load(file_to_load, encoding='bytes')
return data
def save_results(rootpath, filename, results):
""" Save results as a pickle file
"""
if not os.path.exists(rootpath):
os.makedirs(rootpath)
with open(rootpath + filename, 'wb') as file_to_save:
pickle.dump(results, file_to_save)
########## Code to perform Principal Component Analysis (PCA) on a covariate ###################
def do_pca_on_covariate(df_train, df_test, n_components=10, location='pacific', var_id='sst'):
""" Do PCA: learn PC loadings from training data,
and project test data onto corresponding directions
Args:
df_train: multi-index (spatial-temporal) pandas dataframe
-- Training data used to compute Principal axes in feature space
df_test: multi-index pandas dataframe -- Test data
n_components: int -- Number of components to keep
location: str -- location indicator of the climate variable
var_id: str -- climate variable to process
Returns:
df1: pandas dataframe -- PCs for training data
df2: pandas dataframe -- PCs for test data
"""
# check the legitimate of the given parameters
if not isinstance(df_train, pd.DataFrame):
if isinstance(df_train, pd.Series):
df_train = df_train.to_frame() # convert pd.series to pd.dataframe
else:
raise ValueError("Training data needs to be a pandas dataframe")
if not isinstance(df_test, pd.DataFrame):
if isinstance(df_test, pd.Series):
df_test = df_test.to_frame() # convert pd.series to pd.dataframe
else:
raise ValueError("Test data needs to be a pandas dataframe")
# check dataframe level!
if len(df_train.index.names) < 3 or len(df_test.index.names) < 3:
raise ValueError("Multiindex dataframe includes 3 levels: [lat,lon,start_date]")
# flatten the dataframe such that the number of
# samples equals the number of dates in the dataframe
# and the number of features equals to lat x lon
df_train_flat = df_train.unstack(level=[0, 1])
df_test_flat = df_test.unstack(level=[0, 1])
x_train = df_train_flat.to_numpy()
x_test = df_test_flat.to_numpy()
# make sure no NAN
if np.isnan(x_train).sum() > 0:
np.nan_to_num(x_train, 0)
if np.isnan(x_test).sum() > 0:
np.nan_to_num(x_test, 0)
# Initialize the PCA model such that it will reture the top n_components
pca = PCA(n_components=n_components)
# Fit the model with Xtrain and apply the dimensionality reduction on Xtrain.
pca_train = pca.fit_transform(x_train)
# Apply dimensionality reduction to Xtest
pca_test = pca.transform(x_test)
# Convert PCs of Xtrain and Xtest to pandas dataframe
col = ['{}_{}_pca_{}'.format(location, var_id, i) for i in range(n_components)]
df1 = pd.DataFrame(data=pca_train,
columns=col,
index=df_train_flat.index)
df2 = pd.DataFrame(data=pca_test,
columns=col,
index=df_test_flat.index)
return(df1, df2)
def get_pca_from_covariate(rootpath,
data,
var_name, var_location,
train_start, train_end,
test_start, test_end,
n_components=10):
""" Apply PCA on spatial-temporal Climate variables (Covariates),
e.g., Sea surface temperature (SST)
Args:
data: multi-index pandas dataframe -- raw covariate to apply PCA
var_name: str -- covariance name
var_location: str -- covariance location (pacific, atlantic, us, and global)
rootpath: str -- directory to save the results
train_start, train_end: pd.Timestamp() -- the start date and the end date of the training set
test_start, test_end: pd.Timestamp() -- the start date and the end date of the test set
"""
idx = pd.IndexSlice
# check the legitimate of the given parameters
if not isinstance(data, pd.DataFrame):
if isinstance(data, pd.Series):
data = data.to_frame() # convert pd.series to pd.dataframe
else:
raise ValueError("Covariate needs to be a pandas multiindex dataframe")
# check if the train start date and the train end date is out of range
if train_start < data.index.get_level_values('start_date')[0]:
raise ValueError("Train start date is out of range!")
if train_end > data.index.get_level_values('start_date')[-1]:
raise ValueError("Train end date is out of range!")
# check if the test start date and the test end date is out of range
if test_start < train_start:
raise ValueError("Test start date is out of range!")
if test_end < train_end or test_end > data.index.get_level_values('start_date')[-1]:
raise ValueError("Test end date is out of range!")
print('create training-test split')
train_x = data.loc[idx[:, :, train_start:train_end], :]
test_x = data.loc[idx[:, :, test_start:test_end], :]
# start PCA
print('start pca')
train_x_pca, test_x_pca = do_pca_on_covariate(train_x[var_name], test_x[var_name],
n_components, var_location, var_name)
# save PCA data
all_x_pca = train_x_pca.append(test_x_pca)
all_x_pca.to_hdf(rootpath + '{}_{}_pca_all.h5'.format(var_location, var_name),
key=var_name, mode='w')
########## Code to perform z-score on a time-series using long-term mean and std ############################
def get_mean(df1, var_id='tmp2m', date_id='start_date'):
""" Compute the mean and standard deviation of a covariate on the given period
Args:
d1: multi-index pandas dataframe -- covariate
var_id: str -- covariate name
date_id: str -- index column name for date
Return(s):
df1: multi-index pandas dataframe -- with month-day-mean-std added
"""
indexnames = df1.index.names
idxlevel = indexnames.index(date_id)
df1 = df1.assign(month=df1.index.get_level_values(idxlevel).month)
df1 = df1.assign(day=df1.index.get_level_values(idxlevel).day)
# get mean of each date
df1['{}_daily_mean'.format(var_id)] = df1.groupby(['month', 'day'])[var_id].transform('mean')
# get std of each date
df1['{}_daily_std'.format(var_id)] = df1.groupby(['month', 'day'])[var_id].transform('std')
return df1.fillna(0)
def add_month_day(df1, date_id='start_date'):
""" Extract the month-of-year and day-of-year from the date index,
and add it to the datafram
Args:
d1: multi-index pandas dataframe -- covariate
date_id: str -- index column name for date
"""
indexnames = df1.index.names
idxlevel = indexnames.index(date_id)
df1 = df1.assign(month=df1.index.get_level_values(idxlevel).month)
df1 = df1.assign(day=df1.index.get_level_values(idxlevel).day)
return(df1)
def zscore_temporal(rootpath,
data,
var,
train_start='1986-01-01', train_end='2016-12-31',
test_start='2017-01-01', test_end='2018-12-31',
date_id='start_date'):
""" Do zscore on time series only (no spatial information), e.g., pca of a covariate
Args:
rootpath: directory to save the results
data: pd.Dataframe -- dataframe contains data that is about to apply zscore
var: str -- variable name
train_start, train_end: str -- the start date and the end date of the training set
test_start, test_end: str -- the start date and the end date of the test set
date_id: str -- index column name for date
"""
# check the legitimate of the given parameters
if not isinstance(data, pd.DataFrame) and not isinstance(data, pd.Series):
raise ValueError("Data needs to be a pandas dataframe/series.")
idx = pd.IndexSlice
target = data[var].to_frame()
print('pre-process: {}'.format(var))
df1 = target.loc[idx[train_start:train_end], :] # train
df2 = target.loc[idx[test_start:test_end], :] # test
df1 = get_mean(df1, var)
# get first element of each group: mean for each location each month-day
month_day = df1.groupby(['month', 'day']).first()
month_day = month_day.reset_index()
# add month-day column to second dataframe
df2 = add_month_day(df2)
df2.reset_index(level=0, inplace=True)
var_cols = ['{}_daily_{}'.format(var, col_type) for col_type in ['mean', 'std']]
# add mean and std get from df1
df2 = df2.merge(month_day[['month', 'day'] + var_cols], how='left', on=['month', 'day'])
df2 = df2.sort_values(by=[date_id])
df2 = df2.set_index([date_id]) # add multi-index back
df1[var + '_zscore'] = (df1[var] - df1['{}_daily_mean'.format(var)]) / df1['{}_daily_std'.format(var)]
df2[var + '_zscore'] = (df2[var] - df2['{}_daily_mean'.format(var)]) / df2['{}_daily_std'.format(var)]
df_all = df1.append(df2)
df_all.to_hdf(rootpath + '{}_zscore.h5'.format(var), key=var, mode='w')
def zscore_spatial_temporal(rootpath,
target, var_id='tmp2m',
train_start='1986-01-01', train_end='2016-12-31',
test_start='2017-01-01', test_end='2018-12-31',
date_id='start_date'):
""" Apply zscore on spatial-temporal climate variable, e.g., the target variable tmp2m
Args:
rootpath: directory to save the results
data: pd.Dataframe -- dataframe contains data that is about to apply zscore
var_id: variable name
train_start, train_end: str -- the start date and the end date of the training set
test_start, test_end: str -- the start date and the end date of the test set
date_id: column name for time/date
"""
idx = pd.IndexSlice
df1 = target.loc[idx[:, :, train_start:train_end], :] # train
df2 = target.loc[idx[:, :, test_start:test_end], :]# test
# ---- Day-Month Mean of each location ---- #
# Add 'month', 'day' column, and get mean and std of each date, each location
df1 = df1.groupby(['lat', 'lon']).apply(lambda df: get_mean(df, var_id, date_id))
# get first element of each group: mean for each location each month-day
month_day = df1.groupby(['lat', 'lon', 'month', 'day']).first()
month_day = month_day.reset_index()
# add month-day column to second dataframe
df2 = df2.groupby(['lat', 'lon']).apply(lambda df: add_month_day(df, date_id))
df2.reset_index(level=2, inplace=True)
var_cols = ['{}_daily_{}'.format(var_id, col_type) for col_type in ['mean', 'std']]
# add mean and std get from df1
df2 = df2.merge(month_day[['lat', 'lon', 'month', 'day'] + var_cols],
how='left', on=['lat', 'lon', 'month', 'day'])
df2 = df2.sort_values(by=['lat', 'lon', date_id])
df2 = df2.set_index(['lat', 'lon', date_id]) # add multi-index back
df1[var_id+'_zscore'] = (df1[var_id] - df1['{}_daily_mean'.format(var_id)])/df1['{}_daily_std'.format(var_id)]
df2[var_id+'_zscore'] = (df2[var_id] - df2['{}_daily_mean'.format(var_id)])/df2['{}_daily_std'.format(var_id)]
df_all = df1.append(df2)
df_all.sort_index(level=['lat', 'lon'], inplace=True)
df_all.to_hdf(rootpath + 'target_{}_multitask_zscore.h5'.format(var_id), key=var_id, mode='w')
############## train-validation split ##################
def create_sequence_custom(today, time_frame, covariate_map, past_years=2,
curr_shift_days=[7, 14, 28], past_shift_days=[7, 14, 28]):
""" Feature aggregation: add features from past dates
Args:
today: pd.Timestamp() -- the date we want to aggregate feature
time_frame: pandas dataframe -- corresponding dates for covariate map
covariate_map: numpy array -- data/feature we use to aggregate
past_years: int -- number of years in the past to be included
curr_shift_days: list of int -- past dates/neighbors in the current year/most recent year to be included
past_shift_days: list of int -- both past and future dates/neighbors in the past year to be included
Return:
agg_x: numpy array -- the aggragated feature for the date provided by "today"
"""
combine = [today] + [today - pd.DateOffset(days=day) for day in curr_shift_days]
for k in range(past_years): # go to the past k years
today = today - pd.DateOffset(years=1)
past = [today - pd.DateOffset(days=day) for day in past_shift_days]
future = [today + pd.DateOffset(days=day) for day in past_shift_days[::-1]]
time_index_next = future + [today] + past
combine = combine + time_index_next # combine.union(time_index_next)
combine.reverse() # reverse the sequenc from oldest to newest
location = time_frame.loc[combine]
agg_x = covariate_map[location.values].squeeze()
return agg_x
def get_test_train_index_seasonal(test_start, test_end, train_range=10, past_years=2, gap=28):
""" Construct train/test time index used to split training and test dataset
Args:
test_start, test_end: pd.Timestamp() -- the start date and the end date of the test set
train_range: int -- the length (years) to be included in the training set
past_years: int -- the length (years) of features in the past to be included
gap: int -- number of days between the date in X and date in y
Return:
test_start_shift: pd.Timestamp() -- new start date for test
after including # of years in the past
train_start_shift:pd.Timestamp() -- new start date for training
after including # of years in the past
train_time_index: list of pd.Timestamp() -- time index for training set
"""
test_start_shift = test_start - pd.DateOffset(years=train_range + past_years, days=gap)
# handles the train time indices
# you need to gap 28 days to predict Feb-01 standing on Jan-03
train_end = test_start - pd.DateOffset(days=gap)
# train starts 10 years before the end date
train_start = train_end - pd.DateOffset(years=train_range)
# shift another two years to create the sequence
train_start_shift = train_start- pd.DateOffset(years=past_years, days=gap)
train_time_index = pd.date_range(train_start, train_end)
return test_start_shift, train_start_shift, train_time_index
def train_val_split_target(rootpath,
target,
var_id,
val_year, val_month,
train_range=10,
past_years=2,
test_range=28,
test_freq='7D'):
""" Generate Train-validation sets on the target variable tmp2m
Args:
rootpath: str -- the directory to save the results
target: multi-index (spatial-temporal) pandas dataframe -- target data used
to construct training-validation set
var_id: str -- the name of the target variable, e.g., tmp2m and precip
val_year,val_month: int -- the year and the month for the validation set
train_range: int -- the length (years) to be included in the training set
past_years: int -- the length of features in the past to be included
test_range: int -- the length (days) used in the validation set
test_freq: str -- the frequency to generate dates in the validtion set
"""
idx = pd.IndexSlice
# check the legitimate of the given parameters
if not isinstance(target, pd.DataFrame):
if isinstance(target, pd.Series):
target = target.to_frame() # convert pd.series to pd.dataframe
else:
raise ValueError("Dataset needs to be a pandas dataframe")
# check dataframe level!
if len(target.index.names) < 3:
raise ValueError("Multiindex dataframe includes 3 levels: [lat,lon,start_date]")
# handles the test time indices
test_start = pd.Timestamp('{}-{:02d}-01'.format(val_year, val_month), freq='D')
test_end = test_start + pd.DateOffset(days=test_range)
test_time_index = pd.date_range(test_start, test_end, freq=test_freq)
test_start_shift, train_start_shift, train_time_index = get_test_train_index_seasonal(test_start,
test_end,
train_range,
past_years)
train_end = train_time_index[-1]
# train_y = target['{}_zscore'.format(var_id)].to_frame().loc[idx[:, :, train_time_index], :]
# test_y = target['{}_zscore'.format(var_id)].to_frame().loc[idx[:, :, test_time_index], :]
train_y = target['target'].to_frame().loc[idx[:, :, train_time_index], :]
test_y = target['target'].to_frame().loc[idx[:, :, test_time_index], :]
train_y = train_y.unstack(level=[0, 1]).values
test_y = test_y.unstack(level=[0, 1]).values
save_results(rootpath, 'train_y_pca_{}_forecast{}.pkl'.format(val_year, val_month), train_y)
save_results(rootpath, 'val_y_pca_{}_forecast{}.pkl'.format(val_year, val_month), test_y)
def train_val_split_covariate(rootpath,
data,
val_year, val_month,
train_range=10, past_years=2,
test_range=28, test_freq='7D',
n_jobs=16):
# pylint: disable-msg=too-many-locals
""" Generate Train-validation sets for temporal covariates, e.g., PCs, climate indeces
Args:
rootpath: str -- the directory to save the results
data: pandas dataframe -- covariates used to construct training-validation set
val_year,val_month: int -- the year and the month for the validation set
train_range: int -- the length (years) to be included in the training set
past_years: int -- the length of features in the past to be included
test_range: int -- the length (days) used in the validation set
test_freq: str -- the frequency to generate dates in the validtion set
n_jobs: int -- number of workers for parallel
"""
idx = pd.IndexSlice
# check the legitimate of the given parameters
if not isinstance(data, pd.DataFrame):
if isinstance(data, pd.Series):
data = data.to_frame() # convert pd.series to pd.dataframe
else:
raise ValueError("Dataset needs to be a pandas dataframe")
# check dataframe level!
if len(data.index.names) > 1:
raise ValueError("Pandas dataframe for temporal data only!")
# handles the test time indices
test_start = pd.Timestamp('{}-{:02d}-01'.format(val_year, val_month), freq='D')
test_end = test_start + pd.DateOffset(days=test_range)
# [test_start,test_end]
test_time_index = pd.date_range(test_start, test_end, freq=test_freq)
test_start_shift, train_start_shift, train_time_index = get_test_train_index_seasonal(test_start,
test_end,
train_range,
past_years)
train_end = train_time_index[-1]
train_x_norm = data.loc[idx[train_start_shift:train_end], :]
test_x_norm = data.loc[idx[test_start_shift:test_end], :]
time_index1 = | pd.date_range(train_start_shift, train_end) | pandas.date_range |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 7 17:09:57 2019n
@author: abhik
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#heatmap
df = | pd.read_excel("Excel/Final_result.xlsx") | pandas.read_excel |
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if str(tz) in ('UTC', 'tzutc()'):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[ | Timestamp('2011-01-01 10:00') | pandas.Timestamp |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
import io
import tensorflow as tf
from PIL import Image
from utils import dataset_util #ImportError: No module named 'object_detection':copy object_detection/utils to folder
from collections import namedtuple, OrderedDict
def xml_to_csv(path):
xml_list = []
img_counter = 0
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
img_counter += 1
value = (root.find('filename').text,
int(root.find('size')[0].text),
int(root.find('size')[1].text),
member[0].text,
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text)
)
xml_list.append(value)
print('number of labels in csv file:',img_counter)
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
def main():
for directory in ['train','eval']:
image_path = os.path.join(os.getcwd(), 'images/{}'.format(directory))
xml_df = xml_to_csv(image_path)
xml_df.to_csv('csv/{}_labels.csv'.format(directory), index=None)
#
#print('Done converting xml to csv. Number of images in tfrecord:')
#
main()
"""
Usage:
# From tensorflow/models/
# Create train data:
python3 generate_tfrecord.py --csv_input=data/train_labels.csv --output_path=data/train.record
# Create test data:
python3 generate_tfrecord.py --csv_input=data/test_labels.csv --output_path=data/test.record
"""
flags = tf.app.flags
flags.DEFINE_string('csv_input', '', 'Path to the CSV input')
flags.DEFINE_string('output_path', '', 'Path to output TFRecord')
FLAGS = flags.FLAGS
# TO-DO replace this with label map
def class_text_to_int(row_label):
if row_label == 'black':
return 1
if row_label == 'white':
return 2
elif row_label == 'red':
return 3
else:
None
def split(df, group):
data = namedtuple('data', ['filename', 'object'])
gb = df.groupby(group)
return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]
def create_tf_example(group, path):
with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
filename = group.filename.encode('utf8')
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for index, row in group.object.iterrows():
xmins.append(row['xmin'] / width)
xmaxs.append(row['xmax'] / width)
ymins.append(row['ymin'] / height)
ymaxs.append(row['ymax'] / height)
classes_text.append(row['class'].encode('utf8'))
classes.append(class_text_to_int(row['class']))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
def main(_):
writer = tf.python_io.TFRecordWriter(FLAGS.output_path)
path = os.path.join(os.getcwd(), 'images')
examples = | pd.read_csv(FLAGS.csv_input) | pandas.read_csv |
###-----------###
### Importing ###
###-----------###
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from scipy import integrate
import seaborn as sns; sns.set()
###------------------###
### Helper Functions ###
###------------------###
## Time series management
def statal_timeseries(df, log=False):
'''
Returns a dataframe with the number of COVID cases where each row is indexed by a date (t0 = 2020-02-28), and each column is a state of Mexico.
If log=True, return the log of the cases.
'''
if log:
return np.log10( df.drop(['México'], axis=1).set_index('Fecha') )
else:
return df.drop(['México'], axis=1).set_index('Fecha')
def national_timeseries(df, log=False):
'''
Returns a dataframe with the national number of COVID cases for Mexico where each row is indexed by a date (t0 = 2020-02-28).
If log=True, return the log of the cases.
'''
if log:
return np.log10( df.set_index('Fecha').loc[:,['México']] )
else:
return df.set_index('Fecha').loc[:,['México']]
# mean absolute error
def MAE(x,y): return np.mean(np.abs( x[:] - y[:len(x)] ))
### Solution helper functions
## Time series management
def Suceptibles(sol): return sol[:,0]
def Exposed(sol): return sol[:,1]
def Quarantined(sol): return sol[:,2]
def Asymptomatic(sol): return sol[:,3]
def Infected(sol): return sol[:,4]
def Hospitalized(sol): return sol[:,5]
def Recovered(sol): return sol[:,6]
def Deceased(sol): return sol[:,7]
def ActiveCases(sol): return Infected(sol) + Hospitalized(sol)
def TotalCases(sol): return Infected(sol) + Hospitalized(sol) + Recovered(sol) + Deceased(sol)
def ICUcases(sol): return Hospitalized(sol) + Deceased(sol)
## Aggregation
def CasesAggregation(sol_per_state, f=TotalCases): return np.sum( [f(sol) for sol in sol_per_state], axis=0 )
# takes a set of solutions, aggregates them and saves them in a csv
def scenario_to_csv(filename, sol, initial_date, print_=False):
'''
Saves a return a single model output for a given scenario `sol` in a csv
that contains the dynamics of each compartiment in each column.
'''
try:
# our format
t0 = datetime.datetime.strptime(initial_date, '%Y-%m-%d')
except:
# John Hopkins format
t0 = datetime.datetime.strptime(initial_date, '%m/%d/%y')
# this is thought of as a list of arrays
# (t0 + datetime.timedelta(days=x)).strftime('%d-%m')
t_range = [t0 + datetime.timedelta(days=x) for x in range( sol[0].shape[0] )]
CSV = pd.DataFrame(columns=['Fecha','Totales','Infectados','Recuperados','Muertes','Hospitalizados'])
CSV['Totales'] = CasesAggregation(sol, f=TotalCases)
CSV['Recuperados'] = CasesAggregation(sol, f=Recovered)
CSV['Infectados'] = CasesAggregation(sol, f=Infected)
CSV['Muertes'] = CasesAggregation(sol, f=Deceased)
CSV['Hospitalizados'] = CasesAggregation(sol, f=Hospitalized)
CSV['Fecha'] = t_range
CSV.set_index('Fecha', inplace=True)
if print_:
print('Saved projections in {}'.format(filename))
CSV.to_csv(filename)
return CSV
def total_cases_scenarios_to_csv(filename, data, scenarios, initial_date, f=TotalCases, R0_index=''):
'''
Saves and returns a csv file where the first column 'Totales' presents the available COVID-19 data in
Mexico to date. The remaining columns are the fits+projections obtained with the model under different
containtment scenarios.
'''
try:
# our format
t0 = datetime.datetime.strptime(initial_date, '%Y-%m-%d')
except:
# John Hopkins format
t0 = datetime.datetime.strptime(initial_date, '%m/%d/%y')
# this is thought of as a list of arrays
# (t0 + datetime.timedelta(days=x)).strftime('%d-%m')
t_range = [(t0 + datetime.timedelta(days=x)).strftime('%Y-%m-%d') for x in range( scenarios[0][0].shape[0] )]
CSV = pd.DataFrame(columns=['Fecha','Susana_00{}'.format(R0_index),'Susana_20{}'.format(R0_index),'Susana_50{}'.format(R0_index)])
CSV['Fecha'] = t_range
CSV.set_index('Fecha', inplace=True)
CSV.loc[t_range, 'Susana_00{}'.format(R0_index)] = CasesAggregation(scenarios[0], f=f).round().astype('int')
CSV.loc[t_range, 'Susana_20{}'.format(R0_index)] = CasesAggregation(scenarios[1], f=f).round().astype('int')
CSV.loc[t_range, 'Susana_50{}'.format(R0_index)] = CasesAggregation(scenarios[2], f=f).round().astype('int')
# Data = national_timeseries(data)
# Data['México'] = Data['México'].astype(int)
# CSV = Data.join(CSV, how='outer')
if filename != None:
print('Saved projections in {}'.format(filename))
CSV.to_csv(filename)
return CSV
###--------------------###
### PLOTTING FUNCTIONS ###
###--------------------###
def plot_scenarios(filename, projections, data):
'''
Plots the projections of the model. `projections` is a pandas DataFrame which columns contain the projection for each containtment scenario with its confidence interval error bars.
`data` is the raw csv from DATA_URL_MEX and it contains the datapoints of the total confirmed cases in Mexico.
'''
plt.figure( figsize=(10,8) )
# containtment scenario 1 (κ0 = 0.5)
plt.plot(projections['Susana_50'], lw=3, color='yellow', label='$50 \%$ Susana')
plt.fill_between(projections.index.values, projections['Susana_50_min'].values, projections['Susana_50_max'].values,
alpha=0.2, color='yellow');
# containtment scenario 2 (κ0 = 0.2)
plt.plot(projections['Susana_20'], lw=3, color='red', label='$20 \%$ Susana')
plt.fill_between(projections.index.values, projections['Susana_20_min'].values, projections['Susana_20_max'].values,
alpha=0.2, color='red');
# no containtment scenario (κ0 = 0)
plt.plot(projections['Susana_00'], lw=3, color='blue', label='$0 \%$ Susana')
plt.fill_between(projections.index.values, projections['Susana_00_min'].values, projections['Susana_00_max'].values,
alpha=0.2, color='blue');
## Plot total cases data (from 14-march on)
plt.plot(national_timeseries(data)['2020-03-14':], marker='o', ms=9, lw=0, color='black', alpha=0.7, label='datos')
## Plot attributes
# Susana_20 has shown to be the best fit to date (01-04-2020)
mae = MAE( national_timeseries(data)['México'], projections['Susana_20'] )
plt.title( 'Casos totales de COVID-19 en {}. MAE ({}) para la mejor proyección'.format( 'México', (round(mae), 1) ) , size=16)
plt.ylabel('Número de casos', size=15);
plt.legend(loc='upper left')
plt.ylim(-50, np.minimum( CSV['Susana_00'].max() * 1.25, CSV['Susana_00_max'].max()) )
# plt.yscale('log')
plt.xticks(rotation=45)
plt.tight_layout()
## Saving results plot ##
# NOTE: The following warning appear but it doesn't affect the script:
# 'Source ID 8 was not found when attempting to remove it GLib.source_remove(self._idle_draw_id)''
if filename != None:
plt.savefig(filename)
return plt.show()
###-------------------------###
### DIRTY FITTING FUNCTIONS ###
###-------------------------###
def solve_national(r, κ0, print_=False):
'''
Return the aggregate cases of COVID-19 using our model using a containtment scenario κ0, and a proportion
`r` of latent infected people.
This functions assumes that `tc`, `projection_horizon`, `n_days`, `states_mex`,
and `population_per_state_mex` are already defined in the script.
'''
## Preallocation ##
# Vectors of solutions. Each vector will contain the results for each state for a specific containtment scenario
projections_κ0 = []
for (i, state) in enumerate(states_mex):
# population for state_i
N = population_per_state_mex[i]
# cases for state_i (from data)
confirmed = statal_timeseries(mex_confirmed).loc[initial_date, state]
deaths = statal_timeseries(mex_deaths).loc[initial_date, state]
recovered = statal_timeseries(mex_recovered).loc[initial_date, state]
## initial conditions of state_i setup ##
p = 1/2
R0 = recovered / N # fraction of recovered
D0 = deaths / N # fraction of deceased
I0 = ((confirmed/N) - R0 - D0) # fraction of confirmed infected cases
E0 = p*r * I0 # fraction of exposed non-infectious cases. Latent variable
A0 = (1-p)*r * I0 # fraction of asymptomatic but infectious cases. Latent variable
H0 = 0 # fraction of hospitalized cases. No data yet
CH0 = 0 # fraction of self-isolated cases. 0 if no prevention is made by the government
S0 = (1 - E0 - A0 - I0 - R0 - D0 - H0) # fraction of suceptible cases
# inital conditions of state_i
x0 = np.array([S0, E0, CH0, A0, I0, H0, R0, D0])
### MODEL SIMULATIONS FOR VARIOUS CONTAINTMENT SCENARIOS ###
## Parameters ###
params = (β, k_avg, η, α, γI, μI, ν, γH, μH, κ0, σ, tc)
### run models for state_i ###
projection_state_i = solve(SEAIHRD_markov_step, x0, 0.0, n_days, *params) * N
if print_:
print('{} has a population of {} people'.format(state, N))
print('with', (I0 + R0 + D0 + H0)*N, 'total cases.' )
# Join model simulation for state_i in the vector of solutions
projections_κ0.append( projection_state_i )
# Return the total cases at a national level
return CasesAggregation( projections_κ0 , f=TotalCases)
## r minimization helper functions (dirty)
def f(r): return solve_national(r, 0.0, print_=False)
def cross_validation(data, r_range): return [MAE(data, f(r)) for r in r_range]
def r_min(r_range, mae_range): return r_range[mae_range.index(min(mae_range))]
###------------------###
### Model Definition ###
###------------------###
## Model helper functions
# number of contacts
def k(t, k_avg, κ0, σ, tc): return (1 - κ0*Θ(t,tc))*k_avg + κ0*(σ - 1)*Θ(t,tc)
# probability of not getting infected
def P(I,A, t, β, k_avg, κ0, σ, tc): return 1 - (1-β)**( k(t, k_avg, κ0, σ, tc)*(I+A) )
# heaviside function
def Θ(t,tc): return np.heaviside( t-tc, 1 )
# kronecker delta
def δ(t,tc):
if t == tc:
return 1
else:
return 0
### Non-compartamental ###
## Discrete time Markovian model ##
def SEAIHRD_markov_step(x, t, S_tc, CH_tc, *params):
'''
Suceptible (S), Exposed (E), Asymptomatic (A), Infected (I), Hospitalized (H), Recovered (R), Deceased (D) epidemic model.
The function takes a single time step in the units of days.
When confinement is present, the model is no longer Markovian as the variables depend on the state of S_tc = S(tc) and CH_tc = CH(tc).
If tc = np.inf, then no confinement is made.
'''
β, k_avg, η, α, γI, μI, ν, γH, μH, *containtment_params = params
κ0, σ, tc = containtment_params
S,E,CH,A,I,H,R,D = x
return [S*(1 - P(I,A, t, β, k_avg, κ0, σ, tc)) * (1 - δ(t,tc)*κ0*CH_tc), # S(t+1)
S*P(I,A, t, β, k_avg, κ0, σ, tc) * (1 - δ(t,tc)*κ0*CH_tc) + (1-η)*E, # E(t+1)
S_tc * κ0 * CH_tc * Θ(t,tc), # CH(t+1)
η*E + (1-α)*A, # A(t+1)
α*A + (1 - (γI+μI+ν))*I, # I(t+1)
ν*I + (1 - (γH+μH))*H, # H(t+1)
γI*I + γH*H + R, # R(t+1)
μI*I + μH*H + D] # D(t+1)
### Solver ###
def solve(f, x0, t0, n_steps, *params):
'''
Maps the markov chain defined by `f` with initial distribution `x0` at `t0` for `n_steps` steps.
'''
xt = [xi for xi in x0]
sol = np.zeros( (n_steps, len(x0)) )
S_tc, CH_tc = 0, 0
(β, k_avg, η, α, γI, μI, ν, γH, μH, κ0, σ, tc) = params
t = t0
for (i,t) in enumerate( range(n_steps) ):
if t == tc:
S,E,CH,A,I,H,R,D = xt
S_tc, CH_tc = S, (S + R)**σ
sol[i,:] = xt
xt = f(xt, t, S_tc, CH_tc, *params)
t += 1
return sol
if __name__ == "__main__":
## READING DATA ##
DATA_URL_MEX = 'https://raw.githubusercontent.com/Juancruzd/Mexico-datos/master/datos/series_de_tiempo/'
mex_confirmed = pd.read_csv(DATA_URL_MEX+'covid19_mex_casos_totales.csv', )
mex_deaths = pd.read_csv(DATA_URL_MEX+'covid19_mex_muertes.csv', )
mex_recovered = | pd.read_csv(DATA_URL_MEX+'covid19_mex_recuperados.csv', ) | pandas.read_csv |
from __future__ import unicode_literals
import copy
import io
import itertools
import json
import os
import shutil
import string
import sys
from collections import OrderedDict
from future.utils import iteritems
from unittest import TestCase
import pandas as pd
import pytest
from backports.tempfile import TemporaryDirectory
from tempfile import NamedTemporaryFile
from hypothesis import (
given,
HealthCheck,
reproduce_failure,
settings,
)
from hypothesis.strategies import (
dictionaries,
integers,
floats,
just,
lists,
text,
tuples,
)
from mock import patch, Mock
from oasislmf.model_preparation.manager import OasisManager as om
from oasislmf.model_preparation.pipeline import OasisFilesPipeline as ofp
from oasislmf.models.model import OasisModel
from oasislmf.utils.exceptions import OasisException
from oasislmf.utils.fm import (
unified_canonical_fm_profile_by_level_and_term_group,
)
from oasislmf.utils.metadata import (
OASIS_COVERAGE_TYPES,
OASIS_FM_LEVELS,
OASIS_KEYS_STATUS,
OASIS_PERILS,
OED_COVERAGE_TYPES,
OED_PERILS,
)
from ..models.fakes import fake_model
from ..data import (
canonical_accounts,
canonical_accounts_profile,
canonical_exposure,
canonical_exposure_profile,
canonical_oed_accounts,
canonical_oed_accounts_profile,
canonical_oed_exposure,
canonical_oed_exposure_profile,
fm_input_items,
gul_input_items,
keys,
oasis_fm_agg_profile,
oed_fm_agg_profile,
write_canonical_files,
write_canonical_oed_files,
write_keys_files,
)
class AddModel(TestCase):
def test_models_is_empty___model_is_added_to_model_dict(self):
model = fake_model('supplier', 'model', 'version')
manager = om()
manager.add_model(model)
self.assertEqual({model.key: model}, manager.models)
def test_manager_already_contains_a_model_with_the_given_key___model_is_replaced_in_models_dict(self):
first = fake_model('supplier', 'model', 'version')
second = fake_model('supplier', 'model', 'version')
manager = om(oasis_models=[first])
manager.add_model(second)
self.assertIs(second, manager.models[second.key])
def test_manager_already_contains_a_diferent_model___model_is_added_to_dict(self):
first = fake_model('first', 'model', 'version')
second = fake_model('second', 'model', 'version')
manager = om(oasis_models=[first])
manager.add_model(second)
self.assertEqual({
first.key: first,
second.key: second,
}, manager.models)
class DeleteModels(TestCase):
def test_models_is_not_in_manager___no_model_is_removed(self):
manager = om([
fake_model('supplier', 'model', 'version'),
fake_model('supplier2', 'model2', 'version2'),
])
expected = manager.models
manager.delete_models([fake_model('supplier3', 'model3', 'version3')])
self.assertEqual(expected, manager.models)
def test_models_exist_in_manager___models_are_removed(self):
models = [
fake_model('supplier', 'model', 'version'),
fake_model('supplier2', 'model2', 'version2'),
fake_model('supplier3', 'model3', 'version3'),
]
manager = om(models)
manager.delete_models(models[1:])
self.assertEqual({models[0].key: models[0]}, manager.models)
class GetCanonicalExposureProfile(TestCase):
def test_model_and_kwargs_are_not_set___result_is_null(self):
profile = om().get_canonical_exposure_profile()
self.assertEqual(None, profile)
@given(expected=dictionaries(text(), text()))
def test_model_is_set_with_profile_json___models_profile_is_set_to_expected_json(self, expected):
model = fake_model(resources={'canonical_exposure_profile_json': json.dumps(expected)})
profile = om().get_canonical_exposure_profile(oasis_model=model)
self.assertEqual(expected, profile)
self.assertEqual(expected, model.resources['canonical_exposure_profile'])
@given(model_profile=dictionaries(text(), text()), kwargs_profile=dictionaries(text(), text()))
def test_model_is_set_with_profile_json_and_profile_json_is_passed_through_kwargs___kwargs_profile_is_used(
self,
model_profile,
kwargs_profile
):
model = fake_model(resources={'canonical_exposure_profile_json': json.dumps(model_profile)})
profile = om().get_canonical_exposure_profile(oasis_model=model, canonical_exposure_profile_json=json.dumps(kwargs_profile))
self.assertEqual(kwargs_profile, profile)
self.assertEqual(kwargs_profile, model.resources['canonical_exposure_profile'])
@given(expected=dictionaries(text(), text()))
def test_model_is_set_with_profile_json_path___models_profile_is_set_to_expected_json(self, expected):
with NamedTemporaryFile('w') as f:
json.dump(expected, f)
f.flush()
model = fake_model(resources={'canonical_exposure_profile_path': f.name})
profile = om().get_canonical_exposure_profile(oasis_model=model)
self.assertEqual(expected, profile)
self.assertEqual(expected, model.resources['canonical_exposure_profile'])
@given(model_profile=dictionaries(text(), text()), kwargs_profile=dictionaries(text(), text()))
@settings(suppress_health_check=[HealthCheck.too_slow])
def test_model_is_set_with_profile_json_path_and_profile_json_path_is_passed_through_kwargs___kwargs_profile_is_used(
self,
model_profile,
kwargs_profile
):
with NamedTemporaryFile('w') as model_file, NamedTemporaryFile('w') as kwargs_file:
json.dump(model_profile, model_file)
model_file.flush()
json.dump(kwargs_profile, kwargs_file)
kwargs_file.flush()
model = fake_model(resources={'canonical_exposure_profile_path': model_file.name})
profile = om().get_canonical_exposure_profile(oasis_model=model, canonical_exposure_profile_path=kwargs_file.name)
self.assertEqual(kwargs_profile, profile)
self.assertEqual(kwargs_profile, model.resources['canonical_exposure_profile'])
class CreateModel(TestCase):
def create_model(
self,
lookup='lookup',
keys_file_path='key_file_path',
keys_errors_file_path='keys_error_file_path',
model_exposure_file_path='model_exposure_file_path'
):
model = fake_model(resources={'lookup': lookup})
model.resources['oasis_files_pipeline'].keys_file_path = keys_file_path
model.resources['oasis_files_pipeline'].keys_errors_file_path = keys_errors_file_path
model.resources['oasis_files_pipeline'].model_exposure_file_path = model_exposure_file_path
return model
@given(
lookup=text(min_size=1, alphabet=string.ascii_letters),
keys_file_path=text(min_size=1, alphabet=string.ascii_letters),
keys_errors_file_path=text(min_size=1, alphabet=string.ascii_letters),
exposure_file_path=text(min_size=1, alphabet=string.ascii_letters)
)
def test_supplier_and_model_and_version_only_are_supplied___correct_model_is_returned(
self,
lookup,
keys_file_path,
keys_errors_file_path,
exposure_file_path
):
model = self.create_model(lookup=lookup, keys_file_path=keys_file_path, keys_errors_file_path=keys_errors_file_path, model_exposure_file_path=exposure_file_path)
with patch('oasislmf.model_preparation.lookup.OasisLookupFactory.save_results', Mock(return_value=(keys_file_path, 1, keys_errors_file_path, 1))) as oklf_mock:
res_keys_file_path, res_keys_errors_file_path = om().get_keys(oasis_model=model)
oklf_mock.assert_called_once_with(
lookup,
keys_file_path,
errors_fp=keys_errors_file_path,
model_exposure_fp=exposure_file_path
)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_file_path, keys_file_path)
self.assertEqual(res_keys_file_path, keys_file_path)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_errors_file_path, keys_errors_file_path)
self.assertEqual(res_keys_errors_file_path, keys_errors_file_path)
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version=text(alphabet=string.ascii_letters, min_size=1),
model_lookup=text(min_size=1, alphabet=string.ascii_letters),
model_keys_fp=text(alphabet=string.ascii_letters, min_size=1),
model_keys_errors_fp=text(alphabet=string.ascii_letters, min_size=1),
model_exposure_fp=text(alphabet=string.ascii_letters, min_size=1),
lookup=text(min_size=1, alphabet=string.ascii_letters),
keys_fp=text(alphabet=string.ascii_letters, min_size=1),
keys_errors_fp=text(alphabet=string.ascii_letters, min_size=1),
exposure_fp=text(alphabet=string.ascii_letters, min_size=1)
)
def test_supplier_and_model_and_version_and_absolute_oasis_files_path_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version,
model_lookup,
model_keys_fp,
model_keys_errors_fp,
model_exposure_fp,
lookup,
keys_fp,
keys_errors_fp,
exposure_fp
):
resources={
'lookup': model_lookup,
'keys_file_path': model_keys_fp,
'keys_errors_file_path': model_keys_errors_fp,
'model_exposure_file_path': model_exposure_fp
}
model = om().create_model(supplier_id, model_id, version, resources=resources)
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version)
with patch('oasislmf.model_preparation.lookup.OasisLookupFactory.save_results', Mock(return_value=(keys_fp, 1, keys_errors_fp, 1))) as oklf_mock:
res_keys_file_path, res_keys_errors_file_path = om().get_keys(
oasis_model=model,
lookup=lookup,
model_exposure_file_path=exposure_fp,
keys_file_path=keys_fp,
keys_errors_file_path=keys_errors_fp
)
oklf_mock.assert_called_once_with(
lookup,
keys_fp,
errors_fp=keys_errors_fp,
model_exposure_fp=exposure_fp
)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_file_path, keys_fp)
self.assertEqual(res_keys_file_path, keys_fp)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_errors_file_path, keys_errors_fp)
self.assertEqual(res_keys_errors_file_path, keys_errors_fp)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
self.assertEqual(resources, model.resources)
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
self.assertIsNone(model.resources.get('canonical_exposure_profile'))
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
oasis_files_path=text(alphabet=string.ascii_letters, min_size=1)
)
def test_supplier_and_model_and_version_and_relative_oasis_files_path_only_are_supplied___correct_model_is_returned_with_absolute_oasis_file_path(
self,
supplier_id,
model_id,
version_id,
oasis_files_path
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
oasis_files_path = oasis_files_path.lstrip(os.path.sep)
resources={'oasis_files_path': oasis_files_path}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
self.assertTrue(os.path.isabs(model.resources['oasis_files_path']))
self.assertEqual(os.path.abspath(resources['oasis_files_path']), model.resources['oasis_files_path'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
self.assertIsNone(model.resources.get('canonical_exposure_profile'))
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
canonical_exposure_profile=dictionaries(text(min_size=1), text(min_size=1))
)
def test_supplier_and_model_and_version_and_canonical_exposure_profile_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
canonical_exposure_profile
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={'canonical_exposure_profile': canonical_exposure_profile}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
self.assertEqual(resources['canonical_exposure_profile'], model.resources['canonical_exposure_profile'])
expected_oasis_files_path = os.path.abspath(os.path.join('Files', expected_key.replace('/', '-')))
self.assertEqual(expected_oasis_files_path, model.resources['oasis_files_path'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
oasis_files_path=text(alphabet=string.ascii_letters, min_size=1),
canonical_exposure_profile=dictionaries(text(min_size=1), text(min_size=1))
)
def test_supplier_and_model_and_version_and_relative_oasis_files_path_and_canonical_exposure_profile_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
oasis_files_path,
canonical_exposure_profile
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={'oasis_files_path': oasis_files_path, 'canonical_exposure_profile': canonical_exposure_profile}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
self.assertTrue(os.path.isabs(model.resources['oasis_files_path']))
self.assertEqual(os.path.abspath(resources['oasis_files_path']), model.resources['oasis_files_path'])
self.assertEqual(resources['canonical_exposure_profile'], model.resources['canonical_exposure_profile'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
oasis_files_path=text(alphabet=string.ascii_letters, min_size=1),
canonical_exposure_profile=dictionaries(text(min_size=1), text(min_size=1))
)
def test_supplier_and_model_and_version_and_absolute_oasis_files_path_and_canonical_exposure_profile_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
oasis_files_path,
canonical_exposure_profile
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={'oasis_files_path': os.path.abspath(oasis_files_path), 'canonical_exposure_profile': canonical_exposure_profile}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
self.assertEqual(resources['oasis_files_path'], model.resources['oasis_files_path'])
self.assertEqual(resources['canonical_exposure_profile'], model.resources['canonical_exposure_profile'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
source_accounts_file_path=text(alphabet=string.ascii_letters, min_size=1)
)
def test_supplier_and_model_and_version_and_source_accounts_file_path_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
source_accounts_file_path
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={'source_accounts_file_path': source_accounts_file_path}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
expected_oasis_files_path = os.path.abspath(os.path.join('Files', expected_key.replace('/', '-')))
self.assertEqual(expected_oasis_files_path, model.resources['oasis_files_path'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
self.assertEqual(resources['source_accounts_file_path'], model.resources['source_accounts_file_path'])
self.assertIsNone(model.resources.get('canonical_exposure_profile'))
self.assertIsNone(model.resources.get('canonical_accounts_profile'))
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
source_accounts_file_path=text(alphabet=string.ascii_letters, min_size=1),
canonical_accounts_profile=dictionaries(text(min_size=1), text(min_size=1))
)
def test_supplier_and_model_and_version_and_source_accounts_file_path_and_canonical_accounts_profile_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
source_accounts_file_path,
canonical_accounts_profile
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={'source_accounts_file_path': source_accounts_file_path, 'canonical_accounts_profile': canonical_accounts_profile}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
expected_oasis_files_path = os.path.abspath(os.path.join('Files', expected_key.replace('/', '-')))
self.assertEqual(expected_oasis_files_path, model.resources['oasis_files_path'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
self.assertIsNone(model.resources.get('canonical_exposure_profile'))
self.assertEqual(resources['source_accounts_file_path'], model.resources['source_accounts_file_path'])
self.assertEqual(resources['canonical_accounts_profile'], model.resources['canonical_accounts_profile'])
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
canonical_exposure_profile=dictionaries(text(min_size=1), text(min_size=1)),
source_accounts_file_path=text(alphabet=string.ascii_letters, min_size=1),
canonical_accounts_profile=dictionaries(text(min_size=1), text(min_size=1))
)
def test_supplier_and_model_and_version_and_canonical_exposure_profile_and_source_accounts_file_path_and_canonical_accounts_profile_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
canonical_exposure_profile,
source_accounts_file_path,
canonical_accounts_profile
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={
'canonical_exposure_profile': canonical_exposure_profile,
'source_accounts_file_path': source_accounts_file_path,
'canonical_accounts_profile': canonical_accounts_profile
}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
expected_oasis_files_path = os.path.abspath(os.path.join('Files', expected_key.replace('/', '-')))
self.assertEqual(expected_oasis_files_path, model.resources['oasis_files_path'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
self.assertEqual(resources['canonical_exposure_profile'], model.resources.get('canonical_exposure_profile'))
self.assertEqual(resources['source_accounts_file_path'], model.resources['source_accounts_file_path'])
self.assertEqual(resources['canonical_accounts_profile'], model.resources['canonical_accounts_profile'])
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
oasis_files_path=text(alphabet=string.ascii_letters, min_size=1),
canonical_exposure_profile=dictionaries(text(min_size=1), text(min_size=1)),
source_accounts_file_path=text(alphabet=string.ascii_letters, min_size=1),
canonical_accounts_profile=dictionaries(text(min_size=1), text(min_size=1))
)
def test_supplier_and_model_and_version_and_absolute_oasis_files_path_and_canonical_exposure_profile_and_source_accounts_file_path_and_canonical_accounts_profile_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
oasis_files_path,
canonical_exposure_profile,
source_accounts_file_path,
canonical_accounts_profile
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={
'oasis_files_path': os.path.abspath(oasis_files_path),
'canonical_exposure_profile': canonical_exposure_profile,
'source_accounts_file_path': source_accounts_file_path,
'canonical_accounts_profile': canonical_accounts_profile
}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
self.assertEqual(resources['oasis_files_path'], model.resources['oasis_files_path'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
self.assertEqual(resources['canonical_exposure_profile'], model.resources.get('canonical_exposure_profile'))
self.assertEqual(resources['source_accounts_file_path'], model.resources['source_accounts_file_path'])
self.assertEqual(resources['canonical_accounts_profile'], model.resources['canonical_accounts_profile'])
class LoadCanonicalAccountsProfile(TestCase):
def test_model_and_kwargs_are_not_set___result_is_null(self):
profile = om().get_canonical_accounts_profile()
self.assertEqual(None, profile)
@given(expected=dictionaries(text(), text()))
def test_model_is_set_with_profile_json___models_profile_is_set_to_expected_json(self, expected):
model = fake_model(resources={'canonical_accounts_profile_json': json.dumps(expected)})
profile = om().get_canonical_accounts_profile(oasis_model=model)
self.assertEqual(expected, profile)
self.assertEqual(expected, model.resources['canonical_accounts_profile'])
@given(model_profile=dictionaries(text(), text()), kwargs_profile=dictionaries(text(), text()))
def test_model_is_set_with_profile_json_and_profile_json_is_passed_through_kwargs___kwargs_profile_is_used(
self,
model_profile,
kwargs_profile
):
model = fake_model(resources={'canonical_accounts_profile_json': json.dumps(model_profile)})
profile = om().get_canonical_accounts_profile(oasis_model=model, canonical_accounts_profile_json=json.dumps(kwargs_profile))
self.assertEqual(kwargs_profile, profile)
self.assertEqual(kwargs_profile, model.resources['canonical_accounts_profile'])
@given(expected=dictionaries(text(), text()))
def test_model_is_set_with_profile_json_path___models_profile_is_set_to_expected_json(self, expected):
with NamedTemporaryFile('w') as f:
json.dump(expected, f)
f.flush()
model = fake_model(resources={'canonical_accounts_profile_path': f.name})
profile = om().get_canonical_accounts_profile(oasis_model=model)
self.assertEqual(expected, profile)
self.assertEqual(expected, model.resources['canonical_accounts_profile'])
@given(model_profile=dictionaries(text(), text()), kwargs_profile=dictionaries(text(), text()))
def test_model_is_set_with_profile_json_path_and_profile_json_path_is_passed_through_kwargs___kwargs_profile_is_used(
self,
model_profile,
kwargs_profile
):
with NamedTemporaryFile('w') as model_file, NamedTemporaryFile('w') as kwargs_file:
json.dump(model_profile, model_file)
model_file.flush()
json.dump(kwargs_profile, kwargs_file)
kwargs_file.flush()
model = fake_model(resources={'canonical_accounts_profile_path': model_file.name})
profile = om().get_canonical_accounts_profile(oasis_model=model, canonical_accounts_profile_path=kwargs_file.name)
self.assertEqual(kwargs_profile, profile)
self.assertEqual(kwargs_profile, model.resources['canonical_accounts_profile'])
class GetFmAggregationProfile(TestCase):
def setUp(self):
self.profile = oasis_fm_agg_profile
def test_model_and_kwargs_are_not_set___result_is_null(self):
profile = om().get_fm_aggregation_profile()
self.assertEqual(None, profile)
def test_model_is_set_with_profile_json___models_profile_is_set_to_expected_json(self):
expected = self.profile
profile_json = json.dumps(self.profile)
model = fake_model(resources={'fm_agg_profile_json': profile_json})
profile = om().get_fm_aggregation_profile(oasis_model=model)
self.assertEqual(expected, profile)
self.assertEqual(expected, model.resources['fm_agg_profile'])
def test_model_is_set_with_profile_json_and_profile_json_is_passed_through_kwargs___kwargs_profile_is_used(self):
model = fake_model(resources={'fm_agg_profile_json': json.dumps(self.profile)})
profile = om().get_fm_aggregation_profile(oasis_model=model, fm_agg_profile_json=json.dumps(self.profile))
self.assertEqual(self.profile, profile)
self.assertEqual(self.profile, model.resources['fm_agg_profile'])
def test_model_is_set_with_profile_path___models_profile_is_set_to_expected_json(self):
expected = self.profile
with NamedTemporaryFile('w') as f:
json.dump(expected, f)
f.flush()
model = fake_model(resources={'fm_agg_profile_path': f.name})
profile = om().get_fm_aggregation_profile(oasis_model=model)
self.assertEqual(expected, profile)
self.assertEqual(expected, model.resources['fm_agg_profile'])
def test_model_is_set_with_profile_path_and_profile_path_is_passed_through_kwargs___kwargs_profile_is_used(
self
):
with NamedTemporaryFile('w') as model_file, NamedTemporaryFile('w') as kwargs_file:
json.dump(self.profile, model_file)
model_file.flush()
json.dump(self.profile, kwargs_file)
kwargs_file.flush()
model = fake_model(resources={'fm_agg_profile_path': model_file.name})
profile = om().get_fm_aggregation_profile(oasis_model=model, fm_agg_profile_path=kwargs_file.name)
self.assertEqual(self.profile, profile)
self.assertEqual(self.profile, model.resources['fm_agg_profile'])
@pytest.mark.skipif(True, reason="CSV file transformations to be removed")
class TransformSourceToCanonical(TestCase):
@given(
source_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters),
source_to_canonical_exposure_transformation_file_path=text(min_size=1, alphabet=string.ascii_letters),
source_exposure_validation_file_path=text(alphabet=string.ascii_letters),
canonical_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters)
)
def test_model_is_not_set___parameters_are_taken_from_kwargs(
self,
source_exposure_file_path,
source_to_canonical_exposure_transformation_file_path,
source_exposure_validation_file_path,
canonical_exposure_file_path
):
trans_call_mock = Mock()
with patch('oasislmf.model_preparation.csv_trans.Translator', Mock(return_value=trans_call_mock)) as trans_mock:
om().transform_source_to_canonical(
source_exposure_file_path=source_exposure_file_path,
source_to_canonical_exposure_transformation_file_path=source_to_canonical_exposure_transformation_file_path,
canonical_exposure_file_path=canonical_exposure_file_path
)
trans_mock.assert_called_once_with(
os.path.abspath(source_exposure_file_path),
os.path.abspath(canonical_exposure_file_path),
os.path.abspath(source_to_canonical_exposure_transformation_file_path),
xsd_path=None,
append_row_nums=True,
)
trans_call_mock.assert_called_once_with()
@given(
source_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters),
source_to_canonical_exposure_transformation_file_path=text(min_size=1, alphabet=string.ascii_letters),
source_exposure_validation_file_path=text(alphabet=string.ascii_letters),
canonical_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters)
)
def test_model_is_set___parameters_are_taken_from_model(
self,
source_exposure_file_path,
source_to_canonical_exposure_transformation_file_path,
source_exposure_validation_file_path,
canonical_exposure_file_path):
model = fake_model(resources={
'source_exposure_file_path': source_exposure_file_path,
'source_exposure_validation_file_path': source_exposure_validation_file_path,
'source_to_canonical_exposure_transformation_file_path': source_to_canonical_exposure_transformation_file_path,
})
model.resources['oasis_files_pipeline'].canonical_exposure_path = canonical_exposure_file_path
trans_call_mock = Mock()
with patch('oasislmf.model_preparation.csv_trans.Translator', Mock(return_value=trans_call_mock)) as trans_mock:
#import ipdb; ipdb.set_trace()
om().transform_source_to_canonical(
source_exposure_file_path=source_exposure_file_path,
source_to_canonical_exposure_transformation_file_path=source_to_canonical_exposure_transformation_file_path,
canonical_exposure_file_path=canonical_exposure_file_path
)
trans_mock.assert_called_once_with(
os.path.abspath(source_exposure_file_path),
os.path.abspath(canonical_exposure_file_path),
os.path.abspath(source_to_canonical_exposure_transformation_file_path),
xsd_path=None,
append_row_nums=True
)
trans_call_mock.assert_called_once_with()
@pytest.mark.skipif(True, reason="CSV file transformations to be removed")
class TransformCanonicalToModel(TestCase):
@given(
canonical_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters),
canonical_to_model_exposure_transformation_file_path=text(min_size=1, alphabet=string.ascii_letters),
canonical_exposure_validation_file_path=text(alphabet=string.ascii_letters),
model_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters)
)
def test_model_is_not_set___parameters_are_taken_from_kwargs(
self,
canonical_exposure_file_path,
canonical_to_model_exposure_transformation_file_path,
canonical_exposure_validation_file_path,
model_exposure_file_path):
trans_call_mock = Mock()
with patch('oasislmf.model_preparation.csv_trans.Translator', Mock(return_value=trans_call_mock)) as trans_mock:
om().transform_canonical_to_model(
canonical_exposure_file_path=canonical_exposure_file_path,
canonical_to_model_exposure_transformation_file_path=canonical_to_model_exposure_transformation_file_path,
model_exposure_file_path=model_exposure_file_path,
)
trans_mock.assert_called_once_with(
os.path.abspath(canonical_exposure_file_path),
os.path.abspath(model_exposure_file_path),
os.path.abspath(canonical_to_model_exposure_transformation_file_path),
xsd_path=None,
append_row_nums=False
)
trans_call_mock.assert_called_once_with()
@given(
canonical_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters),
canonical_to_model_exposure_transformation_file_path=text(min_size=1, alphabet=string.ascii_letters),
canonical_exposure_validation_file_path=text(alphabet=string.ascii_letters),
model_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters)
)
def test_model_is_set___parameters_are_taken_from_model(
self,
canonical_exposure_file_path,
canonical_to_model_exposure_transformation_file_path,
canonical_exposure_validation_file_path,
model_exposure_file_path):
model = fake_model(resources={
'canonical_exposure_validation_file_path': canonical_exposure_validation_file_path,
'canonical_to_model_exposure_transformation_file_path': canonical_to_model_exposure_transformation_file_path,
})
model.resources['oasis_files_pipeline'].canonical_exposure_path = canonical_exposure_file_path
model.resources['oasis_files_pipeline'].model_exposure_file_path = model_exposure_file_path
trans_call_mock = Mock()
with patch('oasislmf.model_preparation.csv_trans.Translator', Mock(return_value=trans_call_mock)) as trans_mock:
om().transform_canonical_to_model(
canonical_exposure_file_path=canonical_exposure_file_path,
canonical_to_model_exposure_transformation_file_path=canonical_to_model_exposure_transformation_file_path,
model_exposure_file_path=model_exposure_file_path,
)
trans_mock.assert_called_once_with(
os.path.abspath(canonical_exposure_file_path),
os.path.abspath(model_exposure_file_path),
os.path.abspath(canonical_to_model_exposure_transformation_file_path),
xsd_path=None,
append_row_nums=False
)
trans_call_mock.assert_called_once_with()
class GetKeys(TestCase):
def create_model(
self,
lookup='lookup',
keys_file_path='key_file_path',
keys_errors_file_path='keys_errors_file_path',
model_exposure_file_path='model_exposure_file_path'
):
model = fake_model(resources={'lookup': lookup})
model.resources['oasis_files_pipeline'].keys_file_path = keys_file_path
model.resources['oasis_files_pipeline'].keys_errors_file_path = keys_errors_file_path
model.resources['oasis_files_pipeline'].model_exposure_file_path = model_exposure_file_path
return model
@given(
lookup=text(min_size=1, alphabet=string.ascii_letters),
keys_file_path=text(min_size=1, alphabet=string.ascii_letters),
keys_errors_file_path=text(min_size=1, alphabet=string.ascii_letters),
exposure_file_path=text(min_size=1, alphabet=string.ascii_letters)
)
def test_model_is_supplied_kwargs_are_not___lookup_keys_files_and_exposures_file_from_model_are_used(
self,
lookup,
keys_file_path,
keys_errors_file_path,
exposure_file_path
):
model = self.create_model(lookup=lookup, keys_file_path=keys_file_path, keys_errors_file_path=keys_errors_file_path, model_exposure_file_path=exposure_file_path)
with patch('oasislmf.model_preparation.lookup.OasisLookupFactory.save_results', Mock(return_value=(keys_file_path, 1, keys_errors_file_path, 1))) as oklf_mock:
res_keys_file_path, res_keys_errors_file_path = om().get_keys(oasis_model=model)
oklf_mock.assert_called_once_with(
lookup,
keys_file_path,
errors_fp=keys_errors_file_path,
model_exposure_fp=exposure_file_path
)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_file_path, keys_file_path)
self.assertEqual(res_keys_file_path, keys_file_path)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_errors_file_path, keys_errors_file_path)
self.assertEqual(res_keys_errors_file_path, keys_errors_file_path)
@given(
model_lookup=text(min_size=1, alphabet=string.ascii_letters),
model_keys_fp=text(min_size=1, alphabet=string.ascii_letters),
model_keys_errors_fp=text(min_size=1, alphabet=string.ascii_letters),
model_exposure_fp=text(min_size=1, alphabet=string.ascii_letters),
lookup=text(min_size=1, alphabet=string.ascii_letters),
keys_fp=text(min_size=1, alphabet=string.ascii_letters),
keys_errors_fp=text(min_size=1, alphabet=string.ascii_letters),
exposures_fp=text(min_size=1, alphabet=string.ascii_letters)
)
def test_model_and_kwargs_are_supplied___lookup_keys_files_and_exposures_file_from_kwargs_are_used(
self,
model_lookup,
model_keys_fp,
model_keys_errors_fp,
model_exposure_fp,
lookup,
keys_fp,
keys_errors_fp,
exposures_fp
):
model = self.create_model(lookup=model_lookup, keys_file_path=model_keys_fp, keys_errors_file_path=model_keys_errors_fp, model_exposure_file_path=model_exposure_fp)
with patch('oasislmf.model_preparation.lookup.OasisLookupFactory.save_results', Mock(return_value=(keys_fp, 1, keys_errors_fp, 1))) as oklf_mock:
res_keys_file_path, res_keys_errors_file_path = om().get_keys(
oasis_model=model,
lookup=lookup,
model_exposure_file_path=exposures_fp,
keys_file_path=keys_fp,
keys_errors_file_path=keys_errors_fp
)
oklf_mock.assert_called_once_with(
lookup,
keys_fp,
errors_fp=keys_errors_fp,
model_exposure_fp=exposures_fp
)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_file_path, keys_fp)
self.assertEqual(res_keys_file_path, keys_fp)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_errors_file_path, keys_errors_fp)
self.assertEqual(res_keys_errors_file_path, keys_errors_fp)
class GetGulInputItems(TestCase):
def setUp(self):
self.profile = copy.deepcopy(canonical_exposure_profile)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=0),
keys=keys(size=2)
)
def test_no_fm_terms_in_canonical_profile__oasis_exception_is_raised(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
_p =copy.deepcopy(profile)
for _k, _v in iteritems(_p):
for __k, __v in iteritems(_v):
if 'FM' in __k:
profile[_k].pop(__k)
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
with self.assertRaises(OasisException):
om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=0),
keys=keys(size=2)
)
def test_no_canonical_items__oasis_exception_is_raised(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
with self.assertRaises(OasisException):
om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=2),
keys=keys(size=0)
)
def test_no_keys_items__oasis_exception_is_raised(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
with self.assertRaises(OasisException):
om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=2),
keys=keys(from_statuses=just(OASIS_KEYS_STATUS['success']['id']), size=2)
)
def test_canonical_items_dont_match_any_keys_items__oasis_exception_is_raised(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
l = len(exposures)
for key in keys:
key['id'] += l
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
with self.assertRaises(OasisException):
om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=2),
keys=keys(from_statuses=just(OASIS_KEYS_STATUS['success']['id']), size=2)
)
def test_canonical_profile_doesnt_have_any_tiv_fields__oasis_exception_is_raised(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
tivs = [profile[e]['ProfileElementName'] for e in profile if profile[e].get('FMTermType') and profile[e]['FMTermType'].lower() == 'tiv']
for t in tivs:
profile.pop(t)
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
with self.assertRaises(OasisException):
om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(
from_tivs1=just(0.0),
size=2
),
keys=keys(from_statuses=just(OASIS_KEYS_STATUS['success']['id']), size=2)
)
def test_canonical_items_dont_have_any_positive_tivs__oasis_exception_is_raised(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
with self.assertRaises(OasisException):
om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(
from_tivs1=just(1.0),
size=2
),
keys=keys(
from_coverage_type_ids=just(OASIS_COVERAGE_TYPES['buildings']['id']),
from_statuses=just(OASIS_KEYS_STATUS['success']['id']),
size=2
)
)
def test_only_buildings_coverage_type_in_exposure_and_model_lookup_supporting_single_peril_and_buildings_coverage_type__gul_items_are_generated(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
ufcp = unified_canonical_fm_profile_by_level_and_term_group(profiles=(profile,))
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
matching_canonical_and_keys_item_ids = set(k['id'] for k in keys).intersection([e['row_id'] for e in exposures])
gul_items_df, canexp_df = om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
get_canonical_item = lambda i: (
[e for e in exposures if e['row_id'] == i + 1][0] if len([e for e in exposures if e['row_id'] == i + 1]) == 1
else None
)
get_keys_item = lambda i: (
[k for k in keys if k['id'] == i + 1][0] if len([k for k in keys if k['id'] == i + 1]) == 1
else None
)
tiv_elements = (ufcp[1][1]['tiv'],)
fm_terms = {
1: {
'deductible': 'wscv1ded',
'deductible_min': None,
'deductible_max': None,
'limit': 'wscv1limit',
'share': None
}
}
for i, gul_it in enumerate(gul_items_df.T.to_dict().values()):
can_it = get_canonical_item(int(gul_it['canexp_id']))
self.assertIsNotNone(can_it)
keys_it = get_keys_item(int(gul_it['canexp_id']))
self.assertIsNotNone(keys_it)
positive_tiv_elements = [
t for t in tiv_elements if can_it.get(t['ProfileElementName'].lower()) and can_it[t['ProfileElementName'].lower()] > 0 and t['CoverageTypeID'] == keys_it['coverage_type']
]
for _, t in enumerate(positive_tiv_elements):
tiv_elm = t['ProfileElementName'].lower()
self.assertEqual(tiv_elm, gul_it['tiv_elm'])
tiv_tgid = t['FMTermGroupID']
self.assertEqual(can_it[tiv_elm], gul_it['tiv'])
ded_elm = fm_terms[tiv_tgid].get('deductible')
self.assertEqual(ded_elm, gul_it['ded_elm'])
ded_min_elm = fm_terms[tiv_tgid].get('deductible_min')
self.assertEqual(ded_min_elm, gul_it['ded_min_elm'])
ded_max_elm = fm_terms[tiv_tgid].get('deductible_max')
self.assertEqual(ded_max_elm, gul_it['ded_max_elm'])
lim_elm = fm_terms[tiv_tgid].get('limit')
self.assertEqual(lim_elm, gul_it['lim_elm'])
shr_elm = fm_terms[tiv_tgid].get('share')
self.assertEqual(shr_elm, gul_it['shr_elm'])
self.assertEqual(keys_it['area_peril_id'], gul_it['areaperil_id'])
self.assertEqual(keys_it['vulnerability_id'], gul_it['vulnerability_id'])
self.assertEqual(i + 1, gul_it['item_id'])
self.assertEqual(i + 1, gul_it['coverage_id'])
self.assertEqual(can_it['row_id'], gul_it['group_id'])
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(
from_tivs1=floats(min_value=1.0, allow_infinity=False),
from_tivs2=floats(min_value=2.0, allow_infinity=False),
from_tivs3=floats(min_value=3.0, allow_infinity=False),
from_tivs4=floats(min_value=4.0, allow_infinity=False),
size=2
),
keys=keys(
from_peril_ids=just(OASIS_PERILS['wind']['id']),
from_coverage_type_ids=just(OASIS_COVERAGE_TYPES['buildings']['id']),
from_statuses=just(OASIS_KEYS_STATUS['success']['id']),
size=8
)
)
def test_all_coverage_types_in_exposure_and_model_lookup_supporting_multiple_perils_but_only_buildings_and_other_structures_coverage_types__gul_items_are_generated(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
ufcp = unified_canonical_fm_profile_by_level_and_term_group(profiles=(profile,))
exposures[1]['wscv2val'] = exposures[1]['wscv3val'] = exposures[1]['wscv4val'] = 0.0
keys[1]['id'] = keys[2]['id'] = keys[3]['id'] = 1
keys[2]['peril_id'] = keys[3]['peril_id'] = OASIS_PERILS['quake']['id']
keys[1]['coverage_type'] = keys[3]['coverage_type'] = OASIS_COVERAGE_TYPES['other']['id']
keys[4]['id'] = keys[5]['id'] = keys[6]['id'] = keys[7]['id'] = 2
keys[6]['peril_id'] = keys[7]['peril_id'] = OASIS_PERILS['quake']['id']
keys[5]['coverage_type'] = keys[7]['coverage_type'] = OASIS_COVERAGE_TYPES['other']['id']
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
matching_canonical_and_keys_item_ids = set(k['id'] for k in keys).intersection([e['row_id'] for e in exposures])
gul_items_df, canexp_df = om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
self.assertEqual(len(gul_items_df), 6)
self.assertEqual(len(canexp_df), 2)
tiv_elements = (ufcp[1][1]['tiv'], ufcp[1][2]['tiv'])
fm_terms = {
1: {
'deductible': 'wscv1ded',
'deductible_min': None,
'deductible_max': None,
'limit': 'wscv1limit',
'share': None
},
2: {
'deductible': 'wscv2ded',
'deductible_min': None,
'deductible_max': None,
'limit': 'wscv2limit',
'share': None
}
}
for i, gul_it in enumerate(gul_items_df.T.to_dict().values()):
can_it = canexp_df.iloc[gul_it['canexp_id']].to_dict()
keys_it = [k for k in keys if k['id'] == gul_it['canexp_id'] + 1 and k['peril_id'] == gul_it['peril_id'] and k['coverage_type'] == gul_it['coverage_type_id']][0]
positive_tiv_term = [t for t in tiv_elements if can_it.get(t['ProfileElementName'].lower()) and can_it[t['ProfileElementName'].lower()] > 0 and t['CoverageTypeID'] == keys_it['coverage_type']][0]
tiv_elm = positive_tiv_term['ProfileElementName'].lower()
self.assertEqual(tiv_elm, gul_it['tiv_elm'])
tiv_tgid = positive_tiv_term['FMTermGroupID']
self.assertEqual(can_it[tiv_elm], gul_it['tiv'])
ded_elm = fm_terms[tiv_tgid].get('deductible')
self.assertEqual(ded_elm, gul_it['ded_elm'])
ded_min_elm = fm_terms[tiv_tgid].get('deductible_min')
self.assertEqual(ded_min_elm, gul_it['ded_min_elm'])
ded_max_elm = fm_terms[tiv_tgid].get('deductible_max')
self.assertEqual(ded_max_elm, gul_it['ded_max_elm'])
lim_elm = fm_terms[tiv_tgid].get('limit')
self.assertEqual(lim_elm, gul_it['lim_elm'])
shr_elm = fm_terms[tiv_tgid].get('share')
self.assertEqual(shr_elm, gul_it['shr_elm'])
self.assertEqual(keys_it['area_peril_id'], gul_it['areaperil_id'])
self.assertEqual(keys_it['vulnerability_id'], gul_it['vulnerability_id'])
self.assertEqual(i + 1, gul_it['item_id'])
self.assertEqual(i + 1, gul_it['coverage_id'])
self.assertEqual(can_it['row_id'], gul_it['group_id'])
class GetFmInputItems(TestCase):
def setUp(self):
self.exposures_profile = copy.deepcopy(canonical_exposure_profile)
self.accounts_profile = copy.deepcopy(canonical_accounts_profile)
self.unified_canonical_profile = unified_canonical_fm_profile_by_level_and_term_group(
profiles=[self.exposures_profile, self.accounts_profile]
)
self.fm_agg_profile = copy.deepcopy(oasis_fm_agg_profile)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=2),
accounts=canonical_accounts(size=1),
guls=gul_input_items(size=2)
)
def test_no_fm_terms_in_canonical_profiles__oasis_exception_is_raised(
self,
exposures,
accounts,
guls
):
cep = copy.deepcopy(self.exposures_profile)
cap = copy.deepcopy(self.accounts_profile)
_cep =copy.deepcopy(cep)
_cap =copy.deepcopy(cap)
for _k, _v in iteritems(_cep):
for __k, __v in iteritems(_v):
if 'FM' in __k:
cep[_k].pop(__k)
for _k, _v in iteritems(_cap):
for __k, __v in iteritems(_v):
if 'FM' in __k:
cap[_k].pop(__k)
with NamedTemporaryFile('w') as accounts_file:
write_canonical_files(accounts, accounts_file.name)
with self.assertRaises(OasisException):
fm_df, canacc_df = om().get_fm_input_items(
pd.DataFrame(data=exposures),
pd.DataFrame(data=guls),
cep,
cap,
accounts_file.name,
self.fm_agg_profile
)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=2),
guls=gul_input_items(size=2)
)
def test_no_aggregation_profile__oasis_exception_is_raised(
self,
exposures,
guls
):
cep = copy.deepcopy(self.exposures_profile)
cap = copy.deepcopy(self.accounts_profile)
fmap = {}
with NamedTemporaryFile('w') as accounts_file:
write_canonical_files(canonical_accounts=[], canonical_accounts_file_path=accounts_file.name)
with self.assertRaises(OasisException):
fm_df, canacc_df = om().get_fm_input_items(
pd.DataFrame(data=exposures),
pd.DataFrame(data=guls),
cep,
cap,
accounts_file.name,
fmap
)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=2),
guls=gul_input_items(size=2)
)
def test_no_canonical_accounts_items__oasis_exception_is_raised(
self,
exposures,
guls
):
cep = copy.deepcopy(self.exposures_profile)
cap = copy.deepcopy(self.accounts_profile)
fmap = copy.deepcopy(self.fm_agg_profile)
with NamedTemporaryFile('w') as accounts_file:
write_canonical_files(canonical_accounts=[], canonical_accounts_file_path=accounts_file.name)
with self.assertRaises(OasisException):
fm_df, canacc_df = om().get_fm_input_items(
pd.DataFrame(data=exposures),
pd.DataFrame(data=guls),
cep,
cap,
accounts_file.name,
fmap
)
class GulInputFilesGenerationTestCase(TestCase):
def setUp(self):
self.profile = canonical_exposure_profile
self.manager = om()
def check_items_file(self, gul_items_df, items_file_path):
expected = tuple(
{
k:it[k] for k in ('item_id', 'coverage_id', 'areaperil_id', 'vulnerability_id', 'group_id',)
} for _, it in gul_items_df.iterrows()
)
with io.open(items_file_path, 'r', encoding='utf-8') as f:
result = tuple(pd.read_csv(f).T.to_dict().values())
self.assertEqual(expected, result)
def check_coverages_file(self, gul_items_df, coverages_file_path):
expected = tuple(
{
k:it[k] for k in ('coverage_id', 'tiv',)
} for _, it in gul_items_df.iterrows()
)
with io.open(coverages_file_path, 'r', encoding='utf-8') as f:
result = tuple(pd.read_csv(f).T.to_dict().values())
self.assertEqual(expected, result)
def check_gulsummaryxref_file(self, gul_items_df, gulsummaryxref_file_path):
expected = tuple(
{
k:it[k] for k in ('coverage_id', 'summary_id', 'summaryset_id',)
} for _, it in gul_items_df.iterrows()
)
with io.open(gulsummaryxref_file_path, 'r', encoding='utf-8') as f:
result = tuple(pd.read_csv(f).T.to_dict().values())
self.assertEqual(expected, result)
class FmInputFilesGenerationTestCase(TestCase):
def setUp(self):
self.exposures_profile = canonical_exposure_profile
self.accounts_profile = canonical_accounts_profile
self.unified_canonical_profile = unified_canonical_fm_profile_by_level_and_term_group(
profiles=(self.exposures_profile, self.accounts_profile,)
)
self.fm_agg_profile = oasis_fm_agg_profile
self.manager = om()
def check_fm_policytc_file(self, fm_items_df, fm_policytc_file_path):
fm_policytc_df = pd.DataFrame(
columns=['layer_id', 'level_id', 'agg_id', 'policytc_id'],
data=[key[:4] for key, _ in fm_items_df.groupby(['layer_id', 'level_id', 'agg_id', 'policytc_id', 'limit', 'deductible', 'share'])],
dtype=object
)
expected = tuple(
{
k:it[k] for k in ('layer_id', 'level_id', 'agg_id', 'policytc_id',)
} for _, it in fm_policytc_df.iterrows()
)
with io.open(fm_policytc_file_path, 'r', encoding='utf-8') as f:
result = tuple(pd.read_csv(f).T.to_dict().values())
self.assertEqual(expected, result)
def check_fm_profile_file(self, fm_items_df, fm_profile_file_path):
cols = ['policytc_id', 'calcrule_id', 'limit', 'deductible', 'deductible_min', 'deductible_max', 'attachment', 'share']
fm_profile_df = fm_items_df[cols]
fm_profile_df = pd.DataFrame(
columns=cols,
data=[key for key, _ in fm_profile_df.groupby(cols)]
)
col_repl = [
{'deductible': 'deductible1'},
{'deductible_min': 'deductible2'},
{'deductible_max': 'deductible3'},
{'attachment': 'attachment1'},
{'limit': 'limit1'},
{'share': 'share1'}
]
for repl in col_repl:
fm_profile_df.rename(columns=repl, inplace=True)
n = len(fm_profile_df)
fm_profile_df['index'] = range(n)
fm_profile_df['share2'] = fm_profile_df['share3'] = [0]*n
expected = tuple(
{
k:it[k] for k in ('policytc_id','calcrule_id','deductible1', 'deductible2', 'deductible3', 'attachment1', 'limit1', 'share1', 'share2', 'share3',)
} for _, it in fm_profile_df.iterrows()
)
with io.open(fm_profile_file_path, 'r', encoding='utf-8') as f:
result = tuple( | pd.read_csv(f) | pandas.read_csv |
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas
import arctern
def test_suite():
from multiprocessing import Process
import time
p1 = Process(target=ST_Intersection)
p2 = Process(target=ST_Equals)
p3 = Process(target=ST_Touches)
p4 = Process(target=ST_Overlaps)
p5 = Process(target=ST_Crosses)
p6 = Process(target=ST_Point)
p7 = Process(target=ST_Contains)
p8 = Process(target=ST_Intersects)
p9 = Process(target=ST_Distance)
p10 = Process(target=ST_DistanceSphere)
p11 = Process(target=ST_HausdorffDistance)
p12 = Process(target=ST_PolygonFromEnvelope)
start = time.time()
p1.start()
p2.start()
p3.start()
p4.start()
p5.start()
p6.start()
p7.start()
p8.start()
p9.start()
p10.start()
p11.start()
p12.start()
p1.join()
p2.join()
p3.join()
p4.join()
p5.join()
p6.join()
p7.join()
p8.join()
p9.join()
p10.join()
p11.join()
p12.join()
end = time.time()
print('Task runs %0.2f seconds.' % ((end - start)))
def ST_Intersection():
geo1 = "POLYGON ((113.66220266388723 22.39277623851494, 114.58136061218778 22.39277623851494, 114.58136061218778 22.92800492531275 ,113.66220266388723 22.92800492531275, 113.66220266388723 22.39277623851494))"
geo2 = "POINT (1 1)"
geo_wkb1 = arctern.ST_GeomFromText(geo1)[0]
geo_wkb2 = arctern.ST_GeomFromText(geo2)[0]
arr1 = [geo_wkb1 for x in range(1, 40000001)]
arr2 = [geo_wkb2 for x in range(1, 40000001)]
data1 = pandas.Series(arr1)
data2 = pandas.Series(arr2)
rst = arctern.ST_Intersection(data1, data2)
assert len(rst) == 40000000
def ST_Equals():
geo1 = "POLYGON ((113.66220266388723 22.39277623851494, 114.58136061218778 22.39277623851494, 114.58136061218778 22.92800492531275 ,113.66220266388723 22.92800492531275, 113.66220266388723 22.39277623851494))"
geo2 = "POINT (1 1)"
geo_wkb1 = arctern.ST_GeomFromText(geo1)[0]
geo_wkb2 = arctern.ST_GeomFromText(geo2)[0]
arr1 = [geo_wkb1 for x in range(1, 40000001)]
arr2 = [geo_wkb2 for x in range(1, 40000001)]
data1 = pandas.Series(arr1)
data2 = | pandas.Series(arr2) | pandas.Series |
#!/usr/bin/env python3
# import numpy as np
import requests
import pandas as pd
import datetime
import json
# import matplotlib.pyplot as pp
import time
# import pymongo
import sys
import os
import sqlite3
MONGO_HOST = 'localhost'
MONGO_DB = 'TwStock'
MONGO_COLLETION = 'twse'
# from pymongo import MongoClient
def connect_mongo(): #連線資料庫
global collection
client = MongoClient(MONGO_HOST, 27017)
db = client[MONGO_DB]
collection = db[MONGO_COLLETION]
def transform_date(date): #民國轉西元
y, m, d = date.split('/')
return str(int(y)+1911) + '/' + m + '/' + d
def transform_data(data): #將證交所獲得資料進行資料格式轉換
data[0] = datetime.datetime.strptime(transform_date(data[0]), '%Y/%m/%d')
data[1] = int(data[1].replace(',', ''))#把千進位的逗點去除
data[2] = int(data[2].replace(',', ''))
data[3] = float(data[3].replace(',', ''))
data[4] = float(data[4].replace(',', ''))
data[5] = float(data[5].replace(',', ''))
data[6] = float(data[6].replace(',', ''))
data[7] = float(0.0 if data[7].replace(',', '') == 'X0.00' else data[7].replace(',', '')) # +/-/X表示漲/跌/不比價
data[8] = int(data[8].replace(',', ''))
return data
def transform(data): #讀取每一個元素進行資料格式轉換,再產生新的串列
return [transform_data(d) for d in data]
def genYM(smonth, syear, emonth, eyear): #產生從syear年smonth月到eyear年emonth月的所有年與月的tuple
start = 12 * syear + smonth
end = 12 * eyear + emonth
for num in range(int(start), int(end) + 1):
y, m = divmod(num, 12)
yield y, m
def fetch_data(year: int, month: int, stockno): #擷取從year-month開始到目前為止的所有交易日資料
raw_data = []
today = datetime.datetime.today()
for year, month in genYM(month, year, today.month, today.year): #產生year-month到今天的年與月份,用於查詢證交所股票資料
if month < 10:
date = str(year) + '0' + str(month) + '01' #1到9月
else:
date = str(year) + str(month) + '01' #10月
data = get_stock_history(date, stockno)
for item in data: #取出每一天編號為stockno的股票資料
if collection.find({ #找尋該交易資料是否不存在
"date": item[0],
"stockno": stockno
} ).count() == 0:
element={'date':item[0], 'stockno':stockno, 'shares':item[1], 'amount':item[2], 'open':item[3], 'close':item[4],
'high':item[5], 'low':item[6], 'diff':item[7], 'turnover':item[8]}; #製作MongoDB的插入元素
print(element)
collection.insert_one(element)
time.sleep(10) #延遲5秒,證交所會根據IP進行流量統計,流量過大會斷線
def get_stock_history(date, stock_no, retry = 5):
_date = datetime.date.today().strftime("%Y%m%d")
_proxies = {
'http': 'http://192.168.0.150:8080',
'https': 'http://192.168.0.150:8080',
}
_url = "http://www.twse.com.tw/exchangeReport/STOCK_DAY_ALL?response=open_data"
# _url = 'http://www.twse.com.tw/exchangeReport/STOCK_DAY?date=%s&stockNo=%s' % ( _date, stock_no)
_s_data = '/tmp/s.date'
if os.path.isfile(_s_data) is True:
_res_data = requests.get(_url)
# _res_data = requests.get(_url, proxies = _proxies)
_res_data = _res_data.text
with open(_s_data, 'w') as f:
f.write(_res_data)
else:
with open(_s_data, 'w') as f:
f.write('')
with open(_s_data, 'r') as f:
_line = f.readlines()
_RowDF = {}
_col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 = [], [], [], [], [], [], [], [], [], []
for i in _line:
i = i.rstrip()
i = i.strip('"')
i = i.replace('","', ',')
i = i.split(',')
if len(i) == 10:
if i[0] == '證券代號' or i[4] == '':
pass
else:
_col0.append(i[0])
_col1.append(i[1])
_col2.append(int(i[2])//10000)
_col3.append(int(i[3])//10000)
_col4.append(float(i[4]))
_col5.append(float(i[5]))
_col6.append(float(i[6]))
_col7.append(float(i[7]))
_col8.append(float(i[8]))
_col9.append(int(i[9]))
else:
continue
_RowDF['證券代號'] = _col0
_RowDF['證券名稱'] = _col1
_RowDF['成交股數'] = _col2
_RowDF['成交金額'] = _col3
_RowDF['開盤價'] = _col4
_RowDF['最高價'] = _col5
_RowDF['最低價'] = _col6
_RowDF['收盤價'] = _col7
_RowDF['漲跌價差'] = _col8
_RowDF['成交筆數'] = _col9
pd.set_option('display.max_rows', 100)
df = | pd.DataFrame(_RowDF) | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy.spatial import Delaunay
from scipy.spatial.distance import cdist
from sklearn.linear_model import RANSACRegressor, LinearRegression
import ops.utils
def find_triangles(df):
v, c = get_vectors(df[['i', 'j']].values)
return (pd.concat([
pd.DataFrame(v).rename(columns='V_{0}'.format),
pd.DataFrame(c).rename(columns='c_{0}'.format)], axis=1)
.assign(magnitude=lambda x: x.eval('(V_0**2 + V_1**2)**0.5'))
)
return df_
def nine_edge_hash(dt, i):
"""For triangle `i` in Delaunay triangulation `dt`, extract the vector
displacements of the 9 edges containing to at least one vertex in the
triangle.
Raises an error if triangle `i` lies on the outer boundary of the triangulation.
Example:
dt = Delaunay(X_0)
i = 0
segments, vector = nine_edge_hash(dt, i)
plot_nine_edges(X_0, segments)
"""
# indices of inner three vertices
# already in CCW order
a,b,c = dt.simplices[i]
# reorder so ab is the longest
X = dt.points
start = np.argmax((np.diff(X[[a, b, c, a]], axis=0)**2).sum(axis=1)**0.5)
if start == 0:
order = [0, 1, 2]
elif start == 1:
order = [1, 2, 0]
elif start == 2:
order = [2, 0, 1]
a,b,c = np.array([a,b,c])[order]
# outer three vertices
a_ix, b_ix, c_ix = dt.neighbors[i]
inner = {a,b,c}
outer = lambda xs: [x for x in xs if x not in inner][0]
# should be two shared, one new; if not, probably a weird edge simplex
# that shouldn't hash (return None)
try:
bc = outer(dt.simplices[dt.neighbors[i, order[0]]])
ac = outer(dt.simplices[dt.neighbors[i, order[1]]])
ab = outer(dt.simplices[dt.neighbors[i, order[2]]])
except IndexError:
return None
if any(x == -1 for x in (bc, ac, ab)):
error = 'triangle on outer boundary, neighbors are: {0} {1} {2}'
raise ValueError(error.format(bc, ac, ab))
# segments
segments = [
(a, b),
(b, c),
(c, a),
(a, ab),
(b, ab),
(b, bc),
(c, bc),
(c, ac),
(a, ac),
]
i = X[segments, 0]
j = X[segments, 1]
vector = np.hstack([np.diff(i, axis=1), np.diff(j, axis=1)])
return segments, vector
def plot_nine_edges(X, segments):
fig, ax = plt.subplots()
[(a, b),
(b, c),
(c, a),
(a, ab),
(b, ab),
(b, bc),
(c, bc),
(c, ac),
(a, ac)] = segments
for i0, i1 in segments:
ax.plot(X[[i0, i1], 0], X[[i0, i1], 1])
d = {'a': a, 'b': b, 'c': c, 'ab': ab, 'bc': bc, 'ac': ac}
for k,v in d.items():
i,j = X[v]
ax.text(i,j,k)
ax.scatter(X[:, 0], X[:, 1])
s = X[np.array(segments).flatten()]
lim0 = s.min(axis=0) - 100
lim1 = s.max(axis=0) + 100
ax.set_xlim([lim0[0], lim1[0]])
ax.set_ylim([lim0[1], lim1[1]])
return ax
def get_vectors(X):
"""Get the nine edge vectors and centers for all the faces in the
Delaunay triangulation of point array `X`.
"""
dt = Delaunay(X)
vectors, centers = [], []
for i in range(dt.simplices.shape[0]):
# skip triangles with an edge on the outer boundary
if (dt.neighbors[i] == -1).any():
continue
result = nine_edge_hash(dt, i)
# some rare event
if result is None:
continue
_, v = result
c = X[dt.simplices[i], :].mean(axis=0)
vectors.append(v)
centers.append(c)
return np.array(vectors).reshape(-1, 18), np.array(centers)
def nearest_neighbors(V_0, V_1):
Y = cdist(V_0, V_1, metric='sqeuclidean')
distances = np.sqrt(Y.min(axis=1))
ix_0 = np.arange(V_0.shape[0])
ix_1 = Y.argmin(axis=1)
return ix_0, ix_1, distances
def get_vc(df, normalize=True):
V,c = (df.filter(like='V').values,
df.filter(like='c').values)
if normalize:
V = V / df['magnitude'].values[:, None]
return V, c
def evaluate_match(df_0, df_1, threshold_triangle=0.3, threshold_point=2):
V_0, c_0 = get_vc(df_0)
V_1, c_1 = get_vc(df_1)
i0, i1, distances = nearest_neighbors(V_0, V_1)
# matching triangles
filt = distances < threshold_triangle
X, Y = c_0[i0[filt]], c_1[i1[filt]]
# minimum to proceed
if sum(filt) < 5:
return None, None, -1
# use matching triangles to define transformation
model = RANSACRegressor()
model.fit(X, Y)
rotation = model.estimator_.coef_
translation = model.estimator_.intercept_
# score transformation based on triangle i,j centers
distances = cdist(model.predict(c_0), c_1, metric='sqeuclidean')
# could use a fraction of the data range or nearest neighbor
# distances within one point set
threshold_region = 50
filt = np.sqrt(distances.min(axis=0)) < threshold_region
score = (np.sqrt(distances.min(axis=0))[filt] < threshold_point).mean()
return rotation, translation, score
def build_linear_model(rotation, translation):
m = LinearRegression()
m.coef_ = rotation
m.intercept_ = translation
return m
def prioritize(df_info_0, df_info_1, matches):
"""Produces an Nx2 array of tile (site) identifiers that are predicted
to match within a search radius, based on existing matches.
Expects info tables to contain tile (site) identifier as index
and two columns of coordinates. Matches should be supplied as an
Nx2 array of tile (site) identifiers.
"""
a = df_info_0.loc[matches[:, 0]].values
b = df_info_1.loc[matches[:, 1]].values
model = RANSACRegressor()
model.fit(a, b)
# rank all pairs by distance
predicted = model.predict(df_info_0.values)
distances = cdist(predicted, df_info_1, metric='sqeuclidean')
ix = np.argsort(distances.flatten())
ix_0, ix_1 = np.unravel_index(ix, distances.shape)
candidates = list(zip(df_info_0.index[ix_0], df_info_1.index[ix_1]))
return remove_overlap(candidates, matches)
def remove_overlap(xs, ys):
ys = set(map(tuple, ys))
return [tuple(x) for x in xs if tuple(x) not in ys]
def brute_force_pairs(df_0, df_1):
from tqdm import tqdm_notebook as tqdn
arr = []
for site, df_s in tqdn(df_1.groupby('site'), 'site'):
def work_on(df_t):
rotation, translation, score = evaluate_match(df_t, df_s)
determinant = None if rotation is None else np.linalg.det(rotation)
result = pd.Series({'rotation': rotation,
'translation': translation,
'score': score,
'determinant': determinant})
return result
(df_0
.pipe(ops.utils.gb_apply_parallel, 'tile', work_on)
.assign(site=site)
.pipe(arr.append)
)
return (pd.concat(arr).reset_index()
.sort_values('score', ascending=False)
)
def parallel_process(func, args_list, n_jobs, tqdn=True):
from joblib import Parallel, delayed
work = args_list
if tqdn:
from tqdm import tqdm_notebook
work = tqdm_notebook(work, 'work')
return Parallel(n_jobs=n_jobs)(delayed(func)(*w) for w in work)
def merge_sbs_phenotype(df_sbs_, df_ph_, model):
X = df_sbs_[['i', 'j']].values
Y = df_ph_[['i', 'j']].values
Y_pred = model.predict(X)
threshold = 2
distances = cdist(Y, Y_pred, metric='sqeuclidean')
ix = distances.argmin(axis=1)
filt = np.sqrt(distances.min(axis=1)) < threshold
columns = {'site': 'site', 'cell_ph': 'cell_ph',
'i': 'i_ph', 'j': 'j_ph',}
cols_final = ['well', 'tile', 'cell', 'i', 'j',
'site', 'cell_ph', 'i_ph', 'j_ph', 'distance']
sbs = df_sbs_.iloc[ix[filt]].reset_index(drop=True)
return (df_ph_
[filt].reset_index(drop=True)
[list(columns.keys())]
.rename(columns=columns)
.pipe(lambda x: pd.concat([sbs, x], axis=1))
.assign(distance=np.sqrt(distances.min(axis=1))[filt])
[cols_final]
)
def plot_alignments(df_ph, df_sbs, df_align, site):
"""Filter for one well first.
"""
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(10, 10))
X_0 = df_ph.query('site == @site')[['i', 'j']].values
ax.scatter(X_0[:, 0], X_0[:, 1], s=10)
it = (df_align
.query('site == @site')
.sort_values('score', ascending=False)
.iterrows())
for _, row in it:
tile = row['tile']
X = df_sbs.query('tile == @tile')[['i', 'j']].values
model = build_linear_model(row['rotation'], row['translation'])
Y = model.predict(X)
ax.scatter(Y[:, 0], Y[:, 1], s=1, label=tile)
print(tile)
ax.set_xlim([-50, 1550])
ax.set_ylim([-50, 1550])
return ax
def multistep_alignment(df_0, df_1, df_info_0, df_info_1,
initial_sites=8, batch_size=180):
"""Provide triangles from one well only.
"""
sites = ( | pd.Series(df_info_1.index) | pandas.Series |
"""
Classes and methods to load datasets.
"""
import numpy as np
import struct
from scipy.misc import imresize
from scipy import ndimage
import os
import os.path
import pandas as pd
import json
from collections import defaultdict
from pathlib import Path as pathlib_path
import pickle
'''
Contains helper methods and classes for loading each dataset.
'''
def sample(data, batch_size):
"""
Generic sampling function with uniform distribution.
data: numpy array or list of numpy arrays
batch_size: sample size
"""
if not isinstance(data, list):
idx = np.random.randint(len(data), size=batch_size)
return idx, data[idx],
else:
n = {len(x) for x in data}
assert len(n) == 1
n = n.pop()
idx = np.random.randint(n, size=batch_size)
return idx, tuple(x[idx] for x in data)
class MNIST(object):
"""
Class to load MNIST data.
"""
def __init__(self, ):
self.train_path = '../data/mnist_train'
self.test_path = '../data/mnist_test'
self.train_labels_path = self.train_path + '_labels'
self.test_labels_path = self.test_path + '_labels'
self.Xtr, self.ytr = self._get_data(self.train_path, self.train_labels_path)
self.Xte, self.yte = self._get_data(self.test_path, self.test_labels_path)
self.mu = np.mean(self.Xtr, axis=0)
self.sigma = np.std(self.Xtr, axis=0) + 1e-12
def train_set(self, ):
return self.Xtr, self.ytr
def test_set(self, ):
return self.Xte, self.yte
def sample(self, batch_size, dtype='train', binarize=True):
"""
Samples data from training or test set.
"""
_, (X, Y) = self._sample(dtype, batch_size)
if binarize:
X = self._binarize(X)
return X, Y
def _sample(self, dtype='train', batch_size=100):
"""
Samples data from training set.
"""
if dtype == 'train':
return sample([self.Xtr, self.ytr], batch_size)
elif dtype == 'test':
return sample([self.Xte, self.yte], batch_size)
else:
raise Exception('Training or test set not selected..')
def _binarize(self, data):
"""
Samples bernoulli distribution based on pixel intensities.
"""
return np.random.binomial(n=1, p=data)
def _get_data(self, data_path, labels_path):
"""
Reads MNIST data. Rescales image pixels to be between 0 and 1.
"""
data = self._read_mnist(data_path)
data = data / 255
labels = self._read_mnist(labels_path)
n = len(data)
data = data.reshape([n, -1])
return data, labels
def _read_mnist(self, path):
'''
Function to read MNIST data file, taken from
https://gist.github.com/tylerneylon/ce60e8a06e7506ac45788443f7269e40
'''
with open(path, 'rb') as file:
zero, dtype, dims = struct.unpack('>HBB', file.read(4))
shape = tuple(struct.unpack('>I', file.read(4))[0] for d in range(dims))
data = np.fromstring(file.read(), dtype=np.uint8)
return data.reshape(shape)
class JointMNIST(MNIST):
"""
MNIST data treated as two output variables consisting of the top halves and bottom halves of
each image.
"""
def __init__(self, n_paired):
"""
n_paired: number of paired examples (remaining examples are split into one of top or bottom halves)
"""
super(JointMNIST, self).__init__() # load data
self.n_paired = n_paired
self.split_point = int(784 / 2)
# joint and missing split
_n = len(self.Xtr)
self.x_and_y = set(np.random.randint(_n, size=self.n_paired))
_remain = set(np.arange(_n)) - set(self.x_and_y)
_x_size = int(len(_remain) / 2)
self.x_only = set(np.random.choice(list(_remain), size=_x_size, replace=False))
self.y_only = set(np.array(list(_remain - set(self.x_only))))
def sample(self, batch_size, dtype='train', binarize=True, include_labels=False):
# sample naively
idx, (batch, labels) = self._sample(dtype, batch_size)
if binarize:
batch = self._binarize(batch)
# handle test set case separately
if dtype == 'test':
X = batch[:, 0:self.split_point]
Y = batch[:, self.split_point:]
if include_labels:
return (X, labels), (Y, labels)
else:
return X, Y
# separate indices into paired and missing (for training set)
x_idx = np.array(list(set(idx) & self.x_only))
x_idx = np.array([np.argwhere(idx == x)[0, 0] for x in x_idx], dtype=np.int32)
y_idx = np.array(list(set(idx) & self.y_only))
y_idx = np.array([np.argwhere(idx == x)[0, 0] for x in y_idx], dtype=np.int32)
xy_idx = np.array(list(set(idx) & self.x_and_y))
xy_idx = np.array([np.argwhere(idx == x)[0, 0] for x in xy_idx], dtype=np.int32)
# create separate arrays for jointly observed and marginal data
X = batch[x_idx, 0:self.split_point]
Y = batch[y_idx, self.split_point:]
X_joint = batch[xy_idx, 0:self.split_point]
Y_joint = batch[xy_idx, self.split_point:]
if include_labels: # split label data too
lX = labels[x_idx]
lY = labels[y_idx]
l_joint = labels[xy_idx]
return (X, lX), (Y, lY), (X_joint, l_joint), (Y_joint, l_joint)
else:
return X, Y, X_joint, Y_joint
class JointStratifiedMNIST(MNIST):
"""
MNIST data treated as two output variables consisting of the top halves and bottom halves of
each image. Sampling scheme is stratified across the paired and unpaired datasets.
"""
def __init__(self, n_paired):
"""
n_paired: number of paired examples (remaining examples are split into one of top or bottom halves)
"""
super(JointStratifiedMNIST, self).__init__() # load data
self.n_paired = n_paired
self.split_point = int(784 / 2)
# joint and missing split
_n = len(self.Xtr)
self.x1_and_x2 = np.random.randint(_n, size=self.n_paired)
_remain = set(np.arange(_n)) - set(self.x1_and_x2)
_x_size = int(len(_remain) / 2)
self.x1_only = np.random.choice(list(_remain), size=_x_size, replace=False)
self.x2_only = np.array(list(_remain - set(self.x1_only)))
# separate the datasets
self.x1 = self.Xtr[self.x1_only, 0:self.split_point]
self.y1 = self.ytr[self.x1_only]
self.x2 = self.Xtr[self.x2_only, self.split_point:]
self.y2 = self.ytr[self.x2_only]
self.x12 = self.Xtr[self.x1_and_x2,:]
self.y12 = self.ytr[self.x1_and_x2]
def sample_stratified(self, n_paired_samples, n_unpaired_samples=250, dtype='train',
binarize=True, include_labels=False):
# test set case
if dtype == 'test':
idx, (batch, y) = sample([self.Xte, self.yte], n_paired_samples)
if binarize:
batch = self._binarize(batch)
x1 = batch[:, 0:self.split_point]
x2 = batch[:, self.split_point:]
if include_labels:
return (x1, y), (x2, y)
else:
return x1, x2
# training set case
elif dtype == 'train':
n_min = 2 * n_unpaired_samples // 5
n_min = max(1, n_min)
n_max = n_unpaired_samples - n_min
n_x1 = np.random.randint(low=n_min, high=n_max + 1)
n_x2 = n_unpaired_samples - n_x1
_, (batch_p, y12) = sample([self.x12, self.y12], n_paired_samples)
_, (x1, y1) = sample([self.x1, self.y1], n_x1)
_, (x2, y2) = sample([self.x2, self.y2], n_x2)
if binarize:
batch_p = self._binarize(batch_p)
x1 = self._binarize(x1)
x2 = self._binarize(x2)
x1p = batch_p[:,0:self.split_point]
x2p = batch_p[:,self.split_point:]
if include_labels:
return (x1, y1), (x2, y2), (x1p, y12), (x2p, y12)
else:
return x1, x2, x1p, x2p
class ColouredMNIST(MNIST):
"""
Based on dataset created in the paper: "Unsupervised Image-to-Image Translation Networks"
X dataset consists of MNIST digits with strokes coloured as red, blue, green.
Y dataset consists of MNIST digits transformed to an edge map, and then coloured as orange, magenta, teal.
A small paired dataset consists of a one-to-one mapping between colours in X and colours in Y of the same
MNIST digit.
"""
def __init__(self, n_paired):
"""
n_paired: number of paired examples to create
"""
super(ColouredMNIST, self).__init__() # load data
self.n_paired = n_paired
# colours for X and Y
self.x_colours = [(255, 0, 0), (0, 219, 0), (61, 18, 198)]
self.y_colours = [(255, 211, 0), (0, 191, 43), (0, 41, 191)]
# load from saved if exists
self._path = '../data/mnist_coloured.npz'
if os.path.isfile(self._path):
print("Loading data...", flush=True)
data = np.load(self._path)
self.M1 = data['arr_0']
self.M2 = data['arr_1']
self.M1_test = data['arr_2']
self.M2_test = data['arr_3']
print("Data loaded.", flush=True)
# create modalities if data doesn't exist
else:
self.M1, self.M2 = self._create_modalities(self.Xtr)
self.M1_test, self.M2_test = self._create_modalities(self.Xte)
print("Saving data...", flush=True)
np.savez(self._path, self.M1, self.M2, self.M1_test, self.M2_test)
print("Saved.", flush=True)
# separate indices
_n = len(self.Xtr)
self.x_and_y = set(np.random.randint(_n, size=self.n_paired))
_remain = set(np.arange(_n)) - set(self.x_and_y)
_x_size = int(len(_remain) / 2)
self.x_only = set(np.random.choice(list(_remain), size=_x_size, replace=False))
self.y_only = set(np.array(list(_remain - set(self.x_only))))
def sample(self, batch_size=100, dtype='train', include_labels=False):
"""
Sample minibatch.
"""
idx, (batch, labels) = self._sample(dtype, batch_size)
if dtype == 'test':
X = self.M1_test[idx]
Y = self.M2_test[idx]
X = np.reshape(X, newshape=[-1, 784 * 3])
Y = np.reshape(Y, newshape=[-1, 784 * 3])
if include_labels:
return (X, labels), (Y, labels)
else:
return X, Y
else:
# separate indices into paired and missing (for training set)
x_idx = np.array(list(set(idx) & self.x_only))
x_idx = np.array([np.argwhere(idx == x)[0, 0] for x in x_idx], dtype=np.int32)
y_idx = np.array(list(set(idx) & self.y_only))
y_idx = np.array([np.argwhere(idx == x)[0, 0] for x in y_idx], dtype=np.int32)
xy_idx = np.array(list(set(idx) & self.x_and_y))
xy_idx = np.array([np.argwhere(idx == x)[0, 0] for x in xy_idx], dtype=np.int32)
# create separate arrays for jointly observed and marginal data
X = self.M1[x_idx]
Y = self.M2[y_idx]
X_joint = self.M1[xy_idx]
Y_joint = self.M2[xy_idx]
# reshape
X = np.reshape(X, newshape=[-1, 784 * 3])
Y = np.reshape(Y, newshape=[-1, 784 * 3])
X_joint = np.reshape(X_joint, newshape=[-1, 784 * 3])
Y_joint = np.reshape(Y_joint, newshape=[-1, 784 * 3])
if include_labels: # split label data too
lX = labels[x_idx]
lY = labels[y_idx]
l_joint = labels[xy_idx]
return (X, lX), (Y, lY), (X_joint, l_joint), (Y_joint, l_joint)
else:
return X, Y, X_joint, Y_joint
def _create_modalities(self, data):
"""
Creates X and Y datasets from input MNIST data.
data: numpy array of MNIST digits, with dimensions: #digits x 784
"""
# randomly assign colours
x_bank, y_bank = self._sample_random_colours(len(data))
# colour digits
print("Colouring modalities...", flush=True)
X = self._colour(data, x_bank)
Y = self._colour(data, y_bank)
# reshape and scale
X = np.reshape(X, newshape=[-1, 28, 28, 3]) / 255
Y = np.reshape(Y, newshape=[-1, 28, 28, 3]) # normalized in _edge_map
# compute edge map
print("Computing edge map...", flush=True)
Y = self._edge_map(Y)
return X, Y
def _edge_map(self, data):
"""
Converts MNIST digits into corresponding edge map.
data: numpy array of MNIST digits, with dimensions: #images x height x width
"""
n = len(data)
edges = np.zeros(shape=data.shape)
for i in range(n):
im = data[i]
sx = ndimage.sobel(im, axis=0, mode='constant')
sy = ndimage.sobel(im, axis=1, mode='constant')
sob = np.hypot(sx, sy)
_max = np.max(sob)
edges[i] = sob / _max
return edges
def _colour(self, data, colours):
"""
Randomly colours MNIST digits into one of 3 colours.
data: numpy array of MNIST digits, with dimensions: #images x 784
colours: numpy array of colours, with dimensions: #images x 3
"""
rgb = []
for i in range(3):
rgb_comp = np.zeros(data.shape)
for j in range(len(data)):
ones = np.where(data[j] > 0)[0]
rgb_comp[j] = data[j]
rgb_comp[j, ones] = colours[j, i]
rgb.append(rgb_comp)
return np.stack(rgb, axis=-1)
def _sample_random_colours(self, n_samples):
"""
Draws random colours from each colour bank.
n_samples: number of random colours to draw
"""
x_bank = np.array(self.x_colours)
y_bank = np.array(self.y_colours)
idx = np.random.randint(len(x_bank), size=n_samples)
return x_bank[idx], y_bank[idx]
class ColouredStratifiedMNIST(ColouredMNIST):
"""
Based on dataset created in the paper: "Unsupervised Image-to-Image Translation Networks"
X dataset consists of MNIST digits with strokes coloured as red, blue, green.
Y dataset consists of MNIST digits transformed to an edge map, and then coloured as orange, magenta, teal.
A small paired dataset consists of a one-to-one mapping between colours in X and colours in Y of the same
MNIST digit.
"""
def __init__(self, n_paired, censor=False):
"""
n_paired: number of paired examples to create
"""
super(ColouredStratifiedMNIST, self).__init__(n_paired) # load data
self.x1_and_x2 = np.array(list(self.x_and_y))
self.x1_only = np.array(list(self.x_only))
self.x2_only = np.array(list(self.y_only))
# separate the datasets
self.x1 = self.M1[self.x1_only]
self.y1 = self.ytr[self.x1_only]
self.x2 = self.M2[self.x2_only]
self.y2 = self.ytr[self.x2_only]
self.x1p = self.M1[self.x1_and_x2]
self.x2p = self.M2[self.x1_and_x2]
self.yp = self.ytr[self.x1_and_x2]
if censor:
numbers_train = [0,1,2,3,4,5,6,7]
numbers_test = [8,9]
idx = []
for i, ix in enumerate(self.y1):
if ix in numbers_train:
idx.append(i)
self.y1 = self.y1[idx]
self.x1 = self.x1[idx]
idx = []
for i, ix in enumerate(self.y2):
if ix in numbers_train:
idx.append(i)
self.y2 = self.y2[idx]
self.x2 = self.x2[idx]
idx = []
for i, ix in enumerate(self.yp):
if ix in numbers_train:
idx.append(i)
self.yp = self.yp[idx]
self.x1p = self.x1p[idx]
self.x2p = self.x2p[idx]
idx = []
for i, ix in enumerate(self.yte):
if ix in numbers_test:
idx.append(i)
self.yte = self.yte[idx]
self.M1_test = self.M1_test[idx]
self.M2_test = self.M2_test[idx]
def sample_stratified(self, n_paired_samples, n_unpaired_samples=250, dtype='train', include_labels=False):
# test set case
if dtype == 'test':
_, (x1, x2, y) = sample([self.M1_test, self.M2_test, self.yte], n_paired_samples)
# reshape
x1 = np.reshape(x1, newshape=[-1, 784 * 3])
x2 = np.reshape(x2, newshape=[-1, 784 * 3])
if include_labels:
return (x1, y), (x2, y)
else:
return x1, x2
# training set case
elif dtype == 'train':
n_min = 2 * n_unpaired_samples // 5
n_min = max(1, n_min)
n_max = n_unpaired_samples - n_min
n_x1 = np.random.randint(low=n_min, high=n_max + 1)
n_x2 = n_unpaired_samples - n_x1
_, (x1p, x2p, yp) = sample([self.x1p, self.x2p, self.yp], n_paired_samples)
_, (x1, y1) = sample([self.x1, self.y1], n_x1)
_, (x2, y2) = sample([self.x2, self.y2], n_x2)
# reshape
x1 = np.reshape(x1, newshape=[-1, 784 * 3])
x2 = np.reshape(x2, newshape=[-1, 784 * 3])
x1p = np.reshape(x1p, newshape=[-1, 784 * 3])
x2p = np.reshape(x2p, newshape=[-1, 784 * 3])
if include_labels:
return (x1, y1), (x2, y2), (x1p, yp), (x2p, yp)
else:
return x1, x2, x1p, x2p
class Sketches(object):
def __init__(self, n_paired):
_raw_photo_path = '../data/sketchy/256x256/photo/tx_000100000000/'
_raw_sketch_path = '../data/sketchy/256x256/sketch/tx_000100000000/'
_data_path = '../data/sketch.npz'
if os.path.isfile(_data_path): # load processed data
print("Loading data...", flush=True)
data = np.load(_data_path)
self.x1 = data['arr_0']
self.x2 = data['arr_1']
self.ytr = data['arr_2']
self.x1_test = data['arr_3']
self.x2_test = data['arr_4']
self.yte = data['arr_5']
print("Data loaded.", flush=True)
else: # process data and load
x1 = []
x2 = []
y = []
train = []
test = []
print("Processing data..", flush=True)
categories = [p for p in os.listdir(_raw_photo_path)
if os.path.isdir(os.path.join(_raw_photo_path, p))]
i = 0
for cat in categories:
print("At category: ", cat, flush=True)
cat_photo_path = _raw_photo_path + cat + '/'
cat_sketch_path = _raw_sketch_path + cat + '/'
photo_files = [p for p in os.listdir(cat_photo_path)
if os.path.isfile(os.path.join(cat_photo_path, p))]
sketch_files = [p for p in os.listdir(cat_sketch_path)
if os.path.isfile(os.path.join(cat_sketch_path, p))]
for f in photo_files:
photo_path = cat_photo_path + f
photo = ndimage.imread(photo_path)
photo = imresize(photo, size=0.25, interp='cubic')
photo = np.reshape(photo, newshape=[1, -1])
sketches = [p for p in sketch_files if f.replace('.jpg','')+'-' in p]
is_train = np.random.binomial(n=1, p=0.85) # sort into train/test sets
for sk in sketches:
sketch_path = cat_sketch_path + sk
sketch = ndimage.imread(sketch_path)
sketch = imresize(sketch, size=0.25, interp='cubic')
sketch = np.reshape(sketch, newshape=[1, -1])
x1.append(photo)
x2.append(sketch)
y.append(cat)
if is_train == 1:
train.append(i)
else:
test.append(i)
i += 1
y = pd.Series(y)
y = | pd.Categorical(y) | pandas.Categorical |
def test_get_number_rows_cols_for_fig():
from mspypeline.helpers import get_number_rows_cols_for_fig
assert get_number_rows_cols_for_fig([1, 1, 1, 1]) == (2, 2)
assert get_number_rows_cols_for_fig(4) == (2, 2)
def test_fill_dict():
from mspypeline.helpers import fill_dict
def test_default_to_regular():
from mspypeline.helpers import default_to_regular
from collections import defaultdict
d = defaultdict(int)
d["a"] += 1
assert isinstance(d, defaultdict)
d = default_to_regular(d)
assert isinstance(d, dict)
assert not isinstance(d, defaultdict)
def test_get_analysis_design():
from mspypeline.helpers import get_analysis_design
assert get_analysis_design(["A1_1", "A1_2", "A2_1", "A2_2"]) == {
'A1': {'1': 'A1_1', '2': 'A1_2'},
'A2': {'1': 'A2_1', '2': 'A2_2'}
}
assert get_analysis_design(["A_1_1"]) == {"A": {"1": {"1": "A_1_1"}}}
def test_plot_annotate_line():
from mspypeline.helpers import plot_annotate_line
def test_venn_names():
from mspypeline.helpers import venn_names
def test_install_r_dependencies():
from mspypeline.helpers.Utils import install_r_dependencies
def test_get_number_of_non_na_values():
from mspypeline.helpers import get_number_of_non_na_values as gna
assert gna(20) > gna(10) > gna(5) > gna(3)
assert gna(3) == gna(2) and gna(3) == gna(1)
def test_get_intersection_and_unique():
from mspypeline.helpers import get_intersection_and_unique
import pandas as pd
df1 = pd.DataFrame()
df2 = pd.DataFrame()
assert all(map(pd.Series.equals,
get_intersection_and_unique(df1, df2),
(pd.Series([], dtype=bool), pd.Series([], dtype=bool), pd.Series([], dtype=bool))))
df1 = pd.DataFrame([[1, 1, 1], [1, 1, 1], [0, 0, 0], [1, 0, 0]])
df2 = pd.DataFrame([[1, 1, 1], [0, 0, 0], [1, 1, 1], [1, 0, 0]])
assert all(map(
pd.Series.equals,
get_intersection_and_unique(df1, df2),
( | pd.Series([1, 0, 0, 0], dtype=bool) | pandas.Series |
from __future__ import absolute_import, division, print_function, unicode_literals
import pandas as pd
import numpy as np
import re
import pathlib
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
def tuneHyperParam(cell_type, X_train, y_train):
# define models and parameters
model = RandomForestClassifier()
n_estimators = [10, 50, 100, 500, 1000]
max_features = ['sqrt', 'log2']
# define grid search
grid = dict(n_estimators=n_estimators,max_features=max_features)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=5, random_state=1)
grid_search = GridSearchCV(estimator=model, param_grid=grid, n_jobs=-1, cv=cv, scoring='accuracy',error_score=0)
grid_result = grid_search.fit(X_train, y_train)
# Save Grid Search Results for Plotting later
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
X = pd.concat([ | pd.DataFrame(grid_result.cv_results_["params"]) | pandas.DataFrame |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# import tensorflow as tf
# from tensorflow.keras import layers, optimizers
from matplotlib.pyplot import MultipleLocator
import os
from collections import defaultdict
# import __main__
# __main__.pymol_argv = ['pymol', '-qc']
# import pymol as pm
import seaborn as sns
# from scipy import stats
np.set_printoptions(suppress=True) # Cancel scientific counting display
np.set_printoptions(threshold=np.inf)
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' # Macos needs to be true
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2'
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "2"
# DIY acc
"""def Myaccc(y_true, y_pred):
y_true = tf.cast(y_true, dtype=tf.int32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y_pred, axis=1, output_type=tf.int32),
tf.argmax(y_true, axis=1, output_type=tf.int32)), tf.float32)) # Number of rows saved
return accuracy
"""
"""class Myacc(tf.keras.metrics.Metric):
def __init__(self):
super().__init__()
self.total = self.add_weight(name='total', dtype=tf.int32, initializer=tf.zeros_initializer())
self.count = self.add_weight(name='count', dtype=tf.int32, initializer=tf.zeros_initializer())
def update_state(self, y_true, y_pred, sample_weight=None):
values = tf.cast(tf.equal(tf.argmax(y_true, axis=1, output_type=tf.int32),
tf.argmax(y_pred, axis=1, output_type=tf.int32)), tf.int32)
self.total.assign_add(tf.shape(y_true)[0])
self.count.assign_add(tf.reduce_sum(values))
def result(self):
return self.count / self.total
def reset_states(self):
# The state of the metric will be reset at the start of each epoch.
self.total.assign(0)
self.count.assign(0)
class MyCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
if logs.get("val_myacc") > 0.95 and logs.get("loss") < 0.1:
print("\n meet requirements so cancelling training!")
self.model.stop_training = True
"""
"""def plotNNout(self):
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('probability', fontsize=20)
# ax1.set_xlabel('0', fontsize=20)
ax1.set_ylabel('probability', fontsize=20)
ax1.set_ylabel('frame', fontsize=20)
for i in range(1,8):
path = './models/{0}'.format(i)
model = tf.saved_model.load(path)
# data_x = np.load('./iptg_nobind.npy', allow_pickle=True)
data_x = np.load('./iptg_nobind.npy', allow_pickle=True)[500:]
# print(data_x.shape)
data_x = self.norm(data_x)
data_x = tf.convert_to_tensor(data_x, dtype=tf.float32)
out = model(data_x)
print(out)
ax1.plot(range(4500), out[:,1])
# ax1.plot([0, 1], [0, 1], color='black')
plt.show()
def protran(self):
result=[]
for i in range(1,21):
path = './models/{0}'.format(i)
model = tf.saved_model.load(path)
data_x = np.load('./iptg_nobind.npy', allow_pickle=True)
data_x = self.norm(data_x)
data_x = tf.convert_to_tensor(data_x, dtype=tf.float32)
out = model(data_x)
mean_model = tf.reduce_mean(out[:,0])
result.append(mean_model)
print(mean_model)
print(result)
print("total_mean:", np.mean(result))
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('process', fontsize=20)
ax1.set_xlabel('frame', fontsize=20)
ax1.set_ylabel('probability to Nobind', fontsize=20)
ax1.plot(range(5000),out[:,0])
plt.show()
def train(self,
# i
):
for i in range(7, 8): # Batch training of neural networks
path = self.ANN + "twostates_train.npy" # Read training data
train_x = np.load(path, allow_pickle=True)
test_x = np.load('./iptg_nobind.npy', allow_pickle=True) # Read test data,5000
train_y = np.zeros(shape=(train_x.shape[0])) # Set label,9000
train_y[:4500] = 1
test_y = np.zeros(shape=(test_x.shape[0])) # 5000
# print(train_x.shape, test_x.shape)
dataset_x = np.concatenate((train_x, test_x), axis=0) # Combine training set and test set,14000
# print(dataset_x.shape)
dataset_x = self.norm(dataset_x)
dataset_y = np.concatenate((train_y, test_y)) # Merge tags,14000
# train
dataset_x = tf.convert_to_tensor(dataset_x, dtype=tf.float32)
dataset_y = tf.convert_to_tensor(dataset_y, dtype=tf.int32)
dataset_y_onehot = tf.one_hot(dataset_y, depth=2, dtype=tf.int32)
model = tf.keras.Sequential([
layers.Dense(256, activation=tf.nn.tanh),
layers.Dense(128, activation=tf.nn.tanh),
layers.Dense(64, activation=tf.nn.tanh),
layers.Dense(32, activation=tf.nn.tanh),
layers.Dense(16, activation=tf.nn.tanh),
layers.Dense(8, activation=tf.nn.tanh),
layers.Dense(4, activation=tf.nn.tanh),
layers.Dense(2, activation=tf.nn.softmax)
])
callbacks = MyCallback()
model.compile(optimizer=optimizers.Adam(learning_rate=0.00001),
loss=tf.losses.binary_crossentropy,
metrics=[
Myacc()
])
models_path = './models/' #
logs_dir = './logs/{0}/'.format(i)
logs_train_dir = os.path.join(logs_dir, "train")
logs_valid_dir = os.path.join(logs_dir, "valid")
for dir_name in [logs_dir, logs_train_dir, logs_valid_dir, models_path]:
if not os.path.exists(dir_name):
os.mkdir(dir_name)
summary_writer = tf.summary.create_file_writer(logs_train_dir)
model.fit(
dataset_x,
dataset_y_onehot,
epochs=10000,
shuffle=True,
batch_size=100,
validation_split=5 / 14,
# validation_data=(dataset_x[9000:], dataset_y[9000:]),
callbacks=[callbacks]
)
tf.saved_model.save(model, models_path+'{0}'.format(i))
def testmodels(self):
model1 = tf.saved_model.load("./modelsset2/18")
# model2 = tf.saved_model.load("./models/2")
# data_x = np.load('./iptg_nobind.npy', allow_pickle=True)
data_x = np.load('./Bind.npy', allow_pickle=True)[500:]
data_x = self.norm(data_x)
data_x = tf.convert_to_tensor(data_x, dtype=tf.float32)
# label = np.zeros(shape=(data_x.shape[0]))
# label = tf.convert_to_tensor(label, dtype=tf.int32) #
out1 = model1(data_x)
# print(out)
# out2 = model2(data_x)
pro1 = out1[:,1]
# pro2 = out2[:, 0]
# print(pro1[3754])
# print(pro2[3754])
print(pro1)
print(np.where(pro1==np.min(pro1)))"""
class RAF:
def __init__(self):
self.contact_dis = 4.5 # contact distance between heavy atoms
self.startFrame = 1 # first frame
self.endFrame = 5000 + 1 # last frame
# self.set_name = 7
self.aa = ["GLY", "ALA", "VAL", "LEU", "ILE", "PHE", "TRP", "TYR", "ASP", "ASN",
"GLU", "LYS", "GLN", "MET", "SER", "THR", "CYS", "PRO", "HIS", "ARG",
"HID", "ASN", "ASH", "HIE", "HIP"]
self.data_name = ""
# self.csv_path = ""
# self.frame_path = ""
self.ANN = ""
# self.output = ""
# self.startSet = 1
# self.endSet = 10 + 1
self.Interval = 1 # frame interval
self.frame_name = "md{0}.pdb" # name of every frame
self.csv_name = "{0}.csv" # name of every csv
self.vmd_rmsd_path = "/Users/erik/Desktop/MD_WWN/test_100ns/"
self.RAF_backbone_mass = [14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01]
self.sasa_max = {"GLY": 87.6,
"ALA": 111.9,
"VAL": 154.4,
"LEU": 183.1,
"ILE": 179.1,
"PHE": 206.4,
"TRP": 239.0,
"TYR": 224.6,
"ASP": 169.4,
"ASN": 172.5,
"GLU": 206.0,
"LYS": 212.5,
"GLN": 204.4,
"MET": 204.3,
"SER": 132.9,
"THR": 154.1,
"CYS": 139.3,
"PRO": 148.898, # No hydrogen bonds found, so this figure is calculated by pymol
"HIS": 188.5,
"ARG": 249.0
} # Angstroms^2, using 5-aa stride
# self.hydrophobic_index = [[16, 20, 34, 38, 50, 53], [15, 16, 18, 20]]
# self.hydrophobic_index = [[16, 20, 34, 38, 50, 53], [82, 83, 85, 87]]
self.hydrophobic_index = [16, 20, 34, 38, 50, 53, 82, 83, 85, 87] # sasa_statistics
self.either_index = [37, 41, 45]
self.hydrophilic_index = [36]
self.stride = {"C": "Coil", "T": "Turn", "B": "Bridge", "b": "Bridge", "E": "Strand", "I": "PI_helix",
"G": "310Helix", "H": "AlphaHelix"}
def processIon(self, aa): # Dealing with protonation conditions
if aa in ['ASH']:
return 'ASP'
if aa in ['HIE', 'HID', 'HIP']:
return 'HIS'
return aa
def norm(self, data): # best to normalize the variance
# min-max
min_val = np.min(data)
max_val = np.max(data)
for i in range(data.shape[0]):
for j in range(data.shape[1]):
data[i][j] = (data[i][j] - min_val) / (max_val - min_val)
return data
def thin_data(self, li, fold=20): # randomly
y = []
for i in li:
t = np.random.uniform(low=0, high=1)
if t < 1.0 / fold:
y.append(i)
return y
def space_data(self, li, interval=20): # interval
y = []
count = 0
for i in li:
if count % interval == 0:
y.append(i)
count %= interval
count += 1
return y
def readHeavyAtom_singleChain(self, path) -> np.array:
# To read the coordinates of the heavy atoms of each chain, the chainID information is required
"""[[-21.368 108.599 3.145]
[-19.74 109.906 6.386]
[-19.151 113.618 6.922]
[-16.405 114.786 4.541]
...
[ 8.717 80.336 46.425]
[ 7.828 76.961 48.018]
[ 8.38 74.326 45.331]
[ 12.103 74.061 46.05 ]]"""
print("Reading:", path)
print("Better check out the last column in the input file!")
atom_cor = []
atom_nam = []
with open(path, 'r') as f:
for i in f.readlines():
record = i.strip()
atom = record[:4].strip()
if atom != "ATOM": # Detect ATOM start line
continue
# print(record)
serial = record[6:11].strip() # 697
atname = record[12:16].strip() # CA
resName = self.processIon(record[17:20].strip()) # PRO, Treated protonation conditions
if resName not in self.aa:
continue
resSeq = record[22:26].strip() # 3
cor_x = record[30:38].strip() # Å
cor_y = record[38:46].strip()
cor_z = record[46:54].strip()
element = record[13].strip() # C
xyz = [float(cor_x), float(cor_y), float(cor_z)]
# eg: 2-LYS-N-697
name = resSeq + "-" + resName + "-" + atname + "-" + serial
if element != "H":
atom_cor.append(xyz)
atom_nam.append(name)
return np.array(atom_cor), atom_nam
def euclidean(self, a_matrix, b_matrix):
# Using matrix operation to calculate Euclidean distance
""" b
[[2.23606798 1. 5.47722558]
a [7.07106781 4.69041576 3. ]
[12.20655562 9.8488578 6.4807407 ]]"""
d1 = -2 * np.dot(a_matrix, b_matrix.T)
d2 = np.sum(np.square(a_matrix), axis=1, keepdims=True)
d3 = np.sum(np.square(b_matrix), axis=1)
dist = np.sqrt(d1 + d2 + d3)
return dist
def dihedral(self, p1, p2, p3, p4): # Calculate a single dihedral angle
p12 = p2 - p1
p13 = p3 - p1
p42 = p2 - p4
p43 = p3 - p4
nv1 = np.cross(p13, p12)
nv2 = np.cross(p43, p42)
if nv2.dot(p12) >= 0:
signature = -1
else:
signature = 1
result = (np.arccos(np.dot(nv1, nv2) / (np.linalg.norm(nv1) * np.linalg.norm(nv2))) / np.pi) * 180 * signature
return result
def dihedral_atom_order(self, atom_nam):
n = 0
atoms = ["N", "CA", "C"]
for i in atom_nam:
if i.split("-")[2] in atoms:
if atoms[n % 3] != i.split("-")[2]:
raise Exception("The order of dihedral atoms is wrong")
n += 1
def PCA_dis(self):
bind = np.load('./Bind.npy', allow_pickle=True)
bind = bind[500:]
nobind = np.load('./Nobind.npy', allow_pickle=True)
nobind = nobind[500:]
print(bind.shape)
# print(bind.shape)
# print(nobind.shape)
meanVal = np.mean(bind, axis=0) # Find the mean value by column, that is, find the mean value of each feature
newData = bind - meanVal
# print(meanVal.shape)
covMat = np.cov(newData, rowvar=False)
eigVals, eigVects = np.linalg.eig(np.mat(covMat))
print(eigVals)
eigValIndice = np.argsort(
eigVals) # Sort the eigenvalues from small to large, and the return value is the index
# print(eigValIndice)
n_eigValIndice = eigValIndice[-1:-(30 + 1):-1] # The subscript of the largest n eigenvalues
n_eigVect = eigVects[:, n_eigValIndice] # The eigenvectors corresponding to the largest n eigenvalues
lowDDataMat = newData * n_eigVect # Low-dimensional feature space data
# print(lowDDataMat.shape)
reconMat = (lowDDataMat * n_eigVect.T) + meanVal # Restructure the data
# print(reconMat)
# print(covMat)
def PCA_dih(self):
result_A_psi_bind = np.squeeze(np.load(self.csv_path + 'result_A_psi_bind.npy', allow_pickle=True))
result_B_psi_bind = np.squeeze(np.load(self.csv_path + 'result_B_psi_bind.npy', allow_pickle=True))
result_A_phi_bind = np.squeeze(np.load(self.csv_path + 'result_A_phi_bind.npy', allow_pickle=True))
result_B_phi_bind = np.squeeze(np.load(self.csv_path + 'result_B_phi_bind.npy', allow_pickle=True))
result_A_psi_nobind = np.squeeze(np.load(self.csv_path + 'result_A_psi_nobind.npy', allow_pickle=True))
result_B_psi_nobind = np.squeeze(np.load(self.csv_path + 'result_B_psi_nobind.npy', allow_pickle=True))
result_A_phi_nobind = np.squeeze(np.load(self.csv_path + 'result_A_phi_nobind.npy', allow_pickle=True))
result_B_phi_nobind = np.squeeze(np.load(self.csv_path + 'result_B_phi_nobind.npy', allow_pickle=True))
# 数据组织形式:(result_A_phi_bind, result_A_psi_bind, result_B_phi_bind, result_B_psi_bind)
nobind = np.hstack((result_A_phi_nobind, result_A_psi_nobind, result_B_phi_nobind, result_B_psi_nobind))
meanVal = np.mean(nobind, axis=0) # Find the mean value by column, that is, find the mean value of each feature
newData = nobind - meanVal
covMat = np.cov(newData, rowvar=False)
eigVals, eigVects = np.linalg.eig(np.mat(covMat))
eigValIndice = np.argsort(
eigVals) # Sort the eigenvalues from small to large, and the return value is the index
n_eigValIndice = eigValIndice[-1:-(30 + 1):-1] # The subscript of the largest n eigenvalues
n_eigVect = eigVects[:, n_eigValIndice] # The eigenvectors corresponding to the largest n eigenvalues
first_vect = np.abs(n_eigVect[:, 0])
first_val = eigVals[n_eigValIndice][0]
print(np.where(first_vect > 0.07)[0] % 267)
def rmsd_plot_gmx(self): # 单位为Å
path = "/Users/erik/Desktop/RAF/crystal_WT/test/1/"
filename = "rmsd.xvg"
frame = 0
rms = []
with open(path + filename) as f:
for j in f.readlines():
record = j.strip()
if len(
record) == 0: # When a blank line is encountered, it means iterating to the end of the file and jumping out of the loop
break
if record[0] not in ["#", "@"]:
li = record.split()
rms.append(float(li[1]) * 10) # Å
frame += 1
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('Figure', fontsize=20)
ax1.set_xlabel('frame', fontsize=20)
ax1.set_ylabel("RMSD(Å)", fontsize=20)
ax1.scatter(range(frame), rms, s=.8)
plt.show()
# np.save(path+"rmsd.npy", np.array(rms))
def gyrate_plot_gmx(self): # unit is Å
# crystal_WT
# dRafX6
num = 5
WD = "/Users/erik/Desktop/RAF"
group = "crystal_WT"
temperatures = ["300K", "344K", "384K"]
interval = 0.02 # ns
for temperature in temperatures:
fig = plt.figure(num=1, figsize=(15, 8), dpi=200)
dir_name = "/".join((WD, group, temperature, "gyrate"))
for k in range(1, num + 1):
if not os.path.exists(dir_name):
os.mkdir(dir_name)
path = "/".join((WD, group, temperature, str(k), "gyrate.xvg"))
gyrate = self.read_gyrate_gmx(path)
average_gyrate = np.mean(gyrate)
# print(len(rms))
ax1 = fig.add_subplot(2, 3, k)
ax1.cla()
ax1.set_title(group + '_' + temperature, fontsize=20)
ax1.set_xlabel('time(ns)', fontsize=2)
ax1.set_ylabel("gyrate(Å)", fontsize=10)
ax1.scatter(np.array(range(len(gyrate))) * interval, gyrate, s=.1)
ax1.plot([0, 500], [average_gyrate, average_gyrate], color="red")
# print(np.mean(rms))
plt.savefig(dir_name + "/gyrate_5.png")
# plt.legend()
# plt.show()
def rmsf_plot(self): # unit is Å
file_name = 'rmsf_CA'
target_file = "/Users/erik/Desktop/RAF/crystal_WT/test/1/" + file_name + ".xvg"
x = [[], []]
y = [[], []]
chain_id = -1
with open(target_file) as f:
for j in f.readlines():
record = j.strip()
if len(
record) == 0: # When a blank line is encountered, it means iterating to the end of the file and jumping out of the loop
break
if record[0] not in ["#", "@"]:
li = record.split()
id = float(li[0])
if id == 1:
chain_id += 1
x[chain_id].append(int(id)) # -55
y[chain_id].append(float(li[1]) * 10)
# np.save(self.vmd_rmsd_path + file_name + ".npy", np.array(y))
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('Figure', fontsize=20)
ax1.set_xlabel('residue', fontsize=20)
ax1.set_ylabel("RMSF(Å)", fontsize=20)
ax1.plot(x[0], y[0]) # 4.1G
ax1.scatter(x[0], y[0], s=20, color="green") # 4.1G
ax1.plot(x[1], y[1]) # NuMA
print(x[0], x[1])
print(y[0], y[1])
# hydrophobic_index
# ax1.scatter(self.hydrophobic_index[0], [y[0][i - 1] for i in self.hydrophobic_index[0]], color="black")
# ax1.scatter(self.hydrophobic_index[1], [y[1][i - 1] for i in self.hydrophobic_index[1]], color="black")
# hydrophilic_index
# ax1.scatter(self.hydrophilic_index[0], y[0][self.hydrophilic_index[0] - 1], color="red")
# either
# ax1.scatter(self.either_index, [y[0][i - 1] for i in self.either_index], color="green")
plt.show()
def rmsf_plot_RAF(self): # unit is Å
file_name = 'rmsf_CA'
num = 5
result_crystal_WT = []
result_cry_repacking = []
temperature = "384K"
Strand = [[4, 5, 6, 7, 8], [15, 16, 17, 18], [43, 44, 45, 46, 47], [72, 73, 74, 75, 76, 77]]
Alphahelix = [[25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36], [65, 66, 67, 68]]
for i in range(2, num + 1):
rmsf = []
target_file = "/Users/erik/Desktop/RAF/crystal_WT/{1}/{0}/".format(i, temperature) + file_name + ".xvg"
with open(target_file) as f:
for j in f.readlines():
record = j.strip()
if len(
record) == 0: # When a blank line is encountered, it means iterating to the end of the file and jumping out of the loop
break
if record[0] not in ["#", "@"]:
li = record.split()
rmsf.append(float(li[1]) * 10)
f.close()
result_crystal_WT.append(rmsf)
for k in range(1, num + 1):
rmsf = []
target_file = "/Users/erik/Desktop/RAF/cry_repacking/{1}/{0}/".format(k, temperature) + file_name + ".xvg"
with open(target_file) as f:
for j in f.readlines():
record = j.strip()
if len(
record) == 0: # When a blank line is encountered, it means iterating to the end of the file and jumping out of the loop
break
if record[0] not in ["#", "@"]:
li = record.split()
rmsf.append(float(li[1]) * 10)
f.close()
result_cry_repacking.append(rmsf)
result_crystal_WT = np.mean(np.array(result_crystal_WT), axis=0)
result_cry_repacking = np.mean(np.array(result_cry_repacking), axis=0)
print("crystal_WT_rmsf_mean:", np.mean(result_crystal_WT))
print("cry_repacking_rmsf_mean:", np.mean(result_cry_repacking))
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('Figure', fontsize=20)
ax1.set_xlabel('residue_CA', fontsize=20)
ax1.set_ylabel("RMSF(Å)", fontsize=20)
ax1.plot(range(1, len(result_crystal_WT) + 1), result_crystal_WT, color="blue")
ax1.scatter(range(1, len(result_crystal_WT) + 1), result_crystal_WT, s=20, color="blue", marker="o")
ax1.plot(range(1, len(result_cry_repacking) + 1), result_cry_repacking, color="red")
ax1.scatter(range(1, len(result_cry_repacking) + 1), result_cry_repacking, s=20, color="red", marker="^")
# strand
for strand in Strand:
ax1.plot(strand, [0] * len(strand), color="black")
# alpha
for alpha in Alphahelix:
ax1.plot(alpha, [0] * len(alpha), color="black")
plt.show()
def sasa_sf(self, path): # Calculate a single sasa
result = []
score = 110
with open(path, 'r') as f:
for i in f.readlines():
record = i.strip()
if record[0:3] == 'ASG':
aa_name = record[5:8]
result.append(self.relative_SASA(aa_name, float(record[64:69])))
hydrophobic_index = [15, 19, 33, 37, 49, 52, 81, 82, 84, 86]
hydrophobic_threshold = 0.36
either_index = [36, 40, 44]
hydrophilic_index = [35]
hydrophilic_threshold = 0.36
for k in hydrophobic_index:
if result[k] > hydrophobic_threshold:
print(k)
score -= 10
for j in hydrophilic_index:
if result[j] <= hydrophilic_threshold:
print(j)
score -= 10
# return result
return score
def sasa(self):
"""path_dir = "/Users/erik/Desktop/MD_WWN/REMD/SASA/"
num = 16
series = dict()
for i in range(1,num+1):
for j in range(1, 2001):
path = path_dir + "{0}/sasa_md.pdb.{1}".format(i, j)
if str(i) not in series.keys():
series[str(i)] = [self.sasa_sf(path)]
else:
series[str(i)].append(self.sasa_sf(path))
np.save("./REMD_16_SASA.npy", series)"""
num = 16
series = np.load("./REMD_16_SASA.npy", allow_pickle=True).item()
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
for k in range(1, num + 1):
ax1 = fig.add_subplot(4, 4, k) # Fill in the top left corner first, from left to right
ax1.set_title('score of REMD', fontsize=2)
ax1.set_xlabel('frames(5ps/f)', fontsize=2)
ax1.set_ylabel("score", fontsize=2)
ax1.scatter(range(2000), series[str(k)], s=.1)
plt.show()
return np.array(series)
def relative_SASA(self, aa_name, SASA):
return SASA / self.sasa_max[self.processIon(aa_name)]
def sasa_cluster(self):
result = []
num = 15
for i in range(1, num + 1):
result.append(self.sasa_sf("/Users/erik/Desktop/MD_WWN/SASA/sasa_cluster_{0}.pdb".format(i)))
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('clusters in paper', fontsize=20)
ax1.set_xlabel('cluster_serial', fontsize=20)
ax1.set_ylabel("score", fontsize=20)
ax1.scatter([i for i in range(1, num + 1)], result) # ###############!!!!!!!
# plt.savefig('sasa.png')
plt.show()
def relative_sasa_statistics(self, aa):
result = []
for k in range(1, 5000):
path = "/Users/erik/Desktop/MD_WWN/test_100ns/SASA/sasa_md.pdb." + str(k)
with open(path, 'r') as f:
for i in f.readlines():
record = i.strip()
if record[0:3] == 'ASG' and record[5:8] == aa:
aa_name = record[5:8]
result.append(self.relative_SASA(aa_name, float(record[64:69])))
plt.hist(result, bins=100, facecolor="blue", edgecolor="black", alpha=0.7)
plt.show()
def rmsd_between(self, path1, path2, part):
os.system("echo 4 {2} | gmx rms -s {0} -f {1} -o rmsd_log.xvg -mw yes".format(path1, path2, part))
with open("./rmsd_log.xvg", 'r') as file:
for i in file.readlines():
record = i.strip()
if record[0] not in ["#", "@"]:
record = record.split()
rmsd = float(record[-1]) * 10. # Å
file.close()
return rmsd
def add_chainID(self, file_path):
chain_ID = ["A", "B"]
n = -1
current_aa = ""
with open(file_path, 'r') as f:
for i in f.readlines():
record = i.strip()
atom = record[:4].strip()
if atom == "TEM":
break
if atom != "ATOM": # Detect ATOM start line
continue
resName = self.processIon(record[17:20].strip()) # PRO, Treated protonation conditions
resSeq = int(record[22:26].strip())
if resSeq == 1 and current_aa != resName: # 1 or 62,It depends on my system or Sister Xiaohong’s system
n += 1
record = record[:21] + chain_ID[n] + record[22:]
current_aa = resName
print(record)
def read_rmsd_gmx(self, path):
# path = "/Users/erik/Desktop/Pareto/reverse/4th/"
# filename = "rmsd.xvg"
frame = 0
rms = []
with open(path) as f:
for j in f.readlines():
record = j.strip()
if len(
record) == 0: # When a blank line is encountered, it means iterating to the end of the file and jumping out of the loop
break
if record[0] not in ["#", "@"]:
li = record.split()
rms.append(float(li[1]) * 10) # Å
frame += 1
return rms
def rmsf_plot_amber(self):
path = self.vmd_rmsd_path + "rmsf_stage_2.data"
Res_41G = []
Res_numa = []
AtomicFlx_41G = []
AtomicFlx_numa = []
Res = [Res_41G, Res_numa]
AtomicFlx = [AtomicFlx_41G, AtomicFlx_numa]
chainID = 0
with open(path, 'r') as f:
for i in f.readlines():
record = i.strip()
if record[0] != '#':
record = record.split()
resid = int(float(record[0]))
if resid == 68:
chainID += 1
Res[chainID].append(resid)
AtomicFlx[chainID].append(float(record[1]))
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('RMSF', fontsize=20)
ax1.set_xlabel('Resid', fontsize=20)
ax1.set_ylabel("rmsf(Å)", fontsize=20)
ax1.plot(Res[0], AtomicFlx[0]) # ###############!!!!!!!
ax1.plot(Res[1], AtomicFlx[1])
# hydrophobic_index
ax1.scatter(self.hydrophobic_index[0], [AtomicFlx[0][i - 1] for i in self.hydrophobic_index[0]], color="black")
ax1.scatter(self.hydrophobic_index[1], [AtomicFlx[1][i - 1 - 67] for i in self.hydrophobic_index[1]],
color="black")
# hydrophilic_index
ax1.scatter(self.hydrophilic_index[0], AtomicFlx[0][self.hydrophilic_index[0] - 1], color="red")
# either
ax1.scatter(self.either_index, [AtomicFlx[0][i - 1] for i in self.either_index], color="green")
# plt.savefig('sasa.png')
plt.show()
def rmsd_plot_amber(self):
path = "/Users/erik/Desktop/MD_WWN/REMD/new_topo/"
filename = "310K.data"
frame = 0
rms = []
with open(path + filename) as f:
for j in f.readlines():
record = j.strip()
if len(
record) == 0: # When a blank line is encountered, it means iterating to the end of the file and jumping out of the loop
break
if record[0] not in ["#", "@"]:
li = record.split()
rms.append(float(li[1])) # Å already Å
frame += 1
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('Figure', fontsize=20)
ax1.set_xlabel('frame', fontsize=20)
ax1.set_ylabel("RMSD(Å)", fontsize=20)
ax1.scatter(range(frame), rms, s=.1)
plt.show()
def REMD_temperature_generation(self, start, end, replicas):
if os.path.exists("./temperatures.dat"):
os.system("rm temperatures.dat")
T = []
k = np.log(np.power(end / start, 1 / 7))
print(k)
with open("temperatures.dat", 'a') as f:
for i in range(replicas):
T.append(start * np.exp(k * i))
f.write("%.1f" % float(
start * np.exp(k * i))) # Keep one decimal place, and ensure the temperature exponential interval
f.write("\n")
f.close()
return T
def sasa_statistics(self):
"""data = self.sasa()
np.save("./sasa.npy", data)"""
data = np.load("./sasa.npy", allow_pickle=True)
# print(data.shape) # 50000,93
data_mean = np.mean(data, axis=0)
# print(data_mean.shape) # 93,
data_var = np.std(data, axis=0)
# print(data_var.shape)
# print(data_mean[15])
# print(data_var[15])
hydropho_mean = [data_mean[i - 1] for i in self.hydrophobic_index]
hydropho_var = [data_var[i - 1] for i in self.hydrophobic_index]
hydrophi_mean = [data_mean[i - 1] for i in self.hydrophilic_index]
hydrophi_var = [data_var[i - 1] for i in self.hydrophilic_index]
either_mean = [data_mean[i - 1] for i in self.either_index]
either_var = [data_var[i - 1] for i in self.either_index]
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('SASA', fontsize=20)
ax1.set_xlabel('Resid', fontsize=20)
ax1.set_ylabel("relative_SASA", fontsize=20)
ax1.errorbar([i + 1 for i in range(data.shape[1])], data_mean, yerr=data_var, fmt="o")
ax1.errorbar(self.hydrophobic_index, hydropho_mean, yerr=hydropho_var, fmt="o", color="black")
ax1.errorbar(self.hydrophilic_index, hydrophi_mean, yerr=hydrophi_var, fmt="o", color="red")
ax1.errorbar(self.either_index, either_mean, yerr=either_var, fmt="o", color="green")
plt.show()
def aaNumSASA(self):
data = np.load("./sasa.npy", allow_pickle=True)
data_mean = np.mean(data, axis=0)
# print(data_mean.shape)
a = 0 # <=0.36
b = 0 # between 0.36 and 0.6
c = 0 # =>0.6
for i in data_mean:
if i <= 0.36:
a += 1
elif i >= 0.6:
c += 1
else:
b += 1
print(a, b, c)
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('SASA', fontsize=20)
ax1.set_xlabel('Resid', fontsize=20)
ax1.set_ylabel("relative_SASA", fontsize=20)
ax1.bar(["<=0.36", "0.36<sasa<0.6", ">=0.6"], [a, b, c])
plt.show()
"""def time_series_T(self):
path = "/Users/erik/Downloads/remd_output/"
num = 8
temperatures = self.REMD_temperature_generation(269.5, 570.9, num)
series = dict()
current_replica = 1
with open(path+"rem.log") as f:
for j in f.readlines():
record = j.strip().split()
if record[0] != "#":
if float(record[-4]) == 317.50:
print(record[0])
if record[-4] not in series.keys():
series[record[-4]] = [current_replica]
else:
series[record[-4]].append(current_replica)
current_replica = (current_replica+1) % num
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('time series of replica exchange', fontsize=20)
ax1.set_xlabel('time(ps)', fontsize=20)
ax1.set_ylabel("Replica", fontsize=20)
ax1.scatter([k for k in range(1,1001)], series["%.2f" % temperatures[0]], s=.5)
plt.show()"""
def crdidx(self): # All channels experienced by a temperature
start = 1
end = 1
num = 8 # num of temperatures
exchanges = 100000 * (end - start + 1)
series = dict()
for p in range(1, num + 1):
series[str(p)] = []
for serial in range(start, end + 1):
path = "/Users/erik/Desktop/MD_WWN/REMD/new_topo/crdidx.dat".format(serial)
with open(path, 'r') as f:
for i in f.readlines():
record = i.strip().split()
if record[0][0] != "#":
for j in range(1, num + 1):
series[str(j)].append(int(record[j]))
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
for k in range(1, num + 1):
ax1 = fig.add_subplot(3, 3, k)
ax1.set_title('time series of replica exchange', fontsize=2)
ax1.set_xlabel('time(ps)', fontsize=2)
ax1.set_ylabel("Replica", fontsize=2)
ax1.scatter(range(1, exchanges + 1), series[str(k)], s=.1)
plt.show()
def repidx(self): # All temperatures experienced by a channel
start = 1
end = 1
num = 8
exchanges = 100000 * (end - start + 1)
series = dict()
for p in range(1, num + 1):
series[str(p)] = []
for serial in range(start, end + 1):
path = "/Users/erik/Desktop/MD_WWN/REMD/new_topo/repidx.dat".format(serial)
with open(path, 'r') as f:
for i in f.readlines():
record = i.strip().split()
if record[0][0] != "#":
for j in range(1, num + 1):
series[str(j)].append(int(record[j]))
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
for k in range(1, num + 1):
ax1 = fig.add_subplot(3, 3, k)
ax1.set_title('time series of replica exchange', fontsize=2)
ax1.set_xlabel('time(ps)', fontsize=2)
ax1.set_ylabel("Replica", fontsize=2)
ax1.scatter(range(1, exchanges + 1), series[str(k)], s=.1)
plt.show()
def REMD_average(self):
series = np.load("./REMD_16_SASA.npy", allow_pickle=True).item()
score = series["2"] # Select the second copy
score_ave = []
for i in range(1, 21):
sum = 0
for j in range(100 * (i - 1), 100 * i):
sum += score[j]
score_ave.append(sum / 100)
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('average score of replica exchange', fontsize=20)
ax1.set_xlabel('frames', fontsize=20)
ax1.set_ylabel("score", fontsize=20)
ax1.bar([k for k in range(1, 21)], score_ave)
plt.show()
def recover_pdb(self, pdb_path, serial):
path = pdb_path.format(serial)
recover_pdb = []
start = 61
current_chain = 'A'
with open(path, 'r') as file:
for i in file.readlines():
record = i.strip()
if record[:4] == 'ATOM':
name = record[12:16].strip()
chain_ID = record[21]
# resSeq = record[22:26]
# print(chain_ID, resSeq)
# print(record)
if chain_ID == current_chain:
if name == 'N':
start += 1
else:
current_chain = chain_ID
start = 61
if name == 'N':
start += 1
record = record[:22] + "{:>4s}".format(str(start)) + record[26:] + '\n'
recover_pdb.append(record)
file.close()
write_pdb = pdb_path.format(str(serial) + "_recover")
# print(write_pdb)
with open(write_pdb, 'a') as f:
for k in recover_pdb:
f.write(k)
f.close()
def cal_COM(self, cors: np.array, mass: list):
assert len(cors) == len(mass)
M = np.sum(mass)
add_mass = []
for i in range(len(mass)):
add_mass.append(cors[i] * mass[i])
add_mass = np.sum(np.array(add_mass), axis=0)
print(add_mass / M)
def output_COM_restraint(self):
atoms = ["N", "CA", "C"]
len_chain_A = 57
len_total = 74
# for i in range(len_chain_A+1, len_total+1):
# print(i,",", i,",", i,",",end="", sep="")
for i in range(1, (len_total - len_chain_A) * 3 + 1):
print("grnam2({0})='{1}'".format(i, atoms[(i - 1) % 3]), ",", sep="", end="")
def Kdist_plot(self):
start = 1
end = 6
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
for k in range(start, end + 1):
path = "/Users/erik/Desktop/MD_WWN/REMD/new_topo/"
filename = "Kdist.{0}.dat"
distance = []
with open(path + filename.format(k)) as f:
for j in f.readlines():
record = j.strip()
if len(
record) == 0: # When a blank line is encountered, it means iterating to the end of the file and jumping out of the loop
break
if record[0] not in ["#", "@"]:
li = record.split()
distance.append(float(li[1])) # Å already Å
ax1.set_title('dbscan kdist', fontsize=2)
ax1.set_xlabel('frames', fontsize=2)
ax1.set_ylabel("Distance", fontsize=2)
ax1.plot(range(len(distance)), distance, label=str(k))
plt.legend()
plt.show()
def cnumvtime(self):
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
path = "/Users/erik/Desktop/MD_WWN/REMD/new_topo/repre_310.0K/cnumvtime.dat"
cnum = []
with open(path) as f:
for j in f.readlines():
record = j.strip()
if len(
record) == 0: # When a blank line is encountered, it means iterating to the end of the file and jumping out of the loop
break
if record[0] not in ["#", "@"]:
li = record.split()
cnum.append(float(li[1]))
plt.ylim((-5, 20))
ax1.set_title('cluster', fontsize=2)
ax1.set_xlabel('frames', fontsize=2)
ax1.set_ylabel("cnum", fontsize=2)
ax1.scatter(range(len(cnum)), cnum, s=.1)
plt.show()
def find_lowest_ESeq(self):
path = "/Users/erik/Desktop/RAF/designlog"
E = []
with open(path, 'r') as file:
for i in file.readlines():
record = i.strip().split()
E.append(float(record[-2]))
E = np.array(E)
print(np.argsort(E)) # Ascending
print(E[0])
print(E[99])
def hbond_plot_gmx(self):
num = 1
WD = "/Users/erik/Desktop/RAF"
group1 = "cry_repacking_4g0n"
group2 = "cry_dRafX6"
temperatures = ["344K"]
filenames = ["hbond_SS", "hbond_SM", "hbond_SW", "hbond_MW"]
paths = ["/".join((WD, "HBOND")),
"/".join((WD, "HBOND", "{0}_vs_{1}".format(group1, group2)))]
for dir_name in paths:
if not os.path.exists(dir_name):
os.mkdir(dir_name)
for temperature in temperatures:
fig, ax_arr = plt.subplots(2, 2, figsize=(15, 8))
for filename in range(len(filenames)):
hbondgroup1 = []
hbondgroup2 = []
for k in range(1, num + 1):
pathgroup1 = "/".join((WD, group1, temperature, str(k)))
pathgroup2 = "/".join((WD, group2, temperature, str(k)))
pathgroup1 = pathgroup1 + "/{0}.xvg".format(filenames[filename])
pathgroup2 = pathgroup2 + "/{0}.xvg".format(filenames[filename])
# print(pathgroup1)
# print(pathgroup2)
with open(pathgroup1) as file:
for j in file.readlines():
record = j.strip()
if len(record) == 0:
break
if record[0] not in ["#", "@"]:
li = record.split()
hbondgroup1.append(int(li[1]))
file.close()
with open(pathgroup2) as file:
for j in file.readlines():
record = j.strip()
if len(record) == 0:
break
if record[0] not in ["#", "@"]:
li = record.split()
hbondgroup2.append(int(li[1]))
file.close()
# print(len(hbondgroup1))
# print(len(hbondgroup2))
"""ax1 = fig.add_subplot(2, 2, filename+1)
ax1.cla()
ax1.set_title(filenames[filename], fontsize=20)
ax1.set_xlabel('hbond_num', fontsize=2)
ax1.set_ylabel("freq", fontsize=2)"""
sns.kdeplot(np.array(hbondgroup1), shade=True, color="blue", bw_method=.3,
ax=ax_arr[filename // 2][filename % 2]).set_title(filenames[filename])
sns.kdeplot(np.array(hbondgroup2), shade=True, color="red", bw_method=.3,
ax=ax_arr[filename // 2][filename % 2])
# plt.show()
plt.savefig("/".join((WD, "HBOND", "{0}_vs_{1}".format(group1, group2), temperature + ".png")))
# ax1.hist(hbond_cry_repacking, bins=100, color="green")
# ax1.hist(hbond_crystal_WT, bins=100, color="blue")
def gather_dihedral_atom_singChain(self, path, type=None):
result = []
atoms = ["CA", "N", "C"]
atom_cor, atom_nam = self.readHeavyAtom_singleChain(path)
ang_atom_cor = []
self.dihedral_atom_order(atom_nam)
for k in range(len(atom_nam)):
if atom_nam[k].split("-")[2] in atoms: # Add the coordinates of the atoms that make up the dihedral angle
ang_atom_cor.append(atom_cor[k])
if type == "Phi":
ang_atom_cor.reverse()
ang_atom_cor = np.array(ang_atom_cor)
ang_residue = []
for m in range(1, int(ang_atom_cor.shape[0] / 3) + 1):
ang_residue.append(ang_atom_cor[3 * (m - 1):3 * m + 1])
for q in ang_residue:
result.append(self.dihedral(q[0], q[1], q[2], q[3]) if q.shape[0] == 4 else 360)
if type == "Phi":
result.reverse()
return result
def rmsd_plot_gmx_inter(self): # unit is Å
# crystal_WT
# dRafX6
num = 2
WD = "/Users/erik/Desktop/RAF"
group = "crystal_WT"
temperatures = ["384K"]
interval = 0.02 # ns
for temperature in temperatures:
fig = plt.figure(num=1, figsize=(15, 8), dpi=200)
dir_name = "/".join((WD, group, temperature, "RMSD"))
print(temperature + ":")
list_ave = []
for k in range(11, 13):
if not os.path.exists(dir_name):
os.mkdir(dir_name)
path = "/".join((WD, group, temperature, str(k), "rmsd_500ns.xvg"))
print(path)
rms = self.read_rmsd_gmx(path)
average_rms = np.mean(rms)
list_ave.append(average_rms)
print(average_rms, len(rms))
ax1 = fig.add_subplot(2, 3, k - 10)
ax1.cla()
ax1.set_title(group + '_' + temperature, fontsize=20)
ax1.set_xlabel('time(ns)', fontsize=2)
ax1.set_ylabel("Backbone RMSD(Å)", fontsize=10)
ax1.scatter(np.array(range(len(rms))) * interval, rms, s=.1)
ax1.plot([0, 500], [average_rms, average_rms], color="red")
print("ave:", np.mean(list_ave))
plt.savefig(dir_name + "/rmsd_5.png")
# plt.legend()
# plt.show()
def rmsd_plot_gmx_intra(self): # unit is Å
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
target = 1 # the serial of simulation
ax1.set_title('Figure', fontsize=20)
ax1.set_xlabel('Time(ns)', fontsize=20)
ax1.set_ylabel("Backbone RMSD(Å)", fontsize=20)
interval = 0.02 # ns
path_1 = "/Users/erik/Desktop/RAF/crystal_WT/test/{0}/".format(target)
path_2 = "/Users/erik/Desktop/RAF/cry_repacking/test/{0}/".format(target)
filename = "rmsd.xvg"
frame = 0
rms_1 = []
time_1 = []
with open(path_1 + filename) as f:
for j in f.readlines():
record = j.strip()
if len(record) == 0:
break
if record[0] not in ["#", "@"]:
li = record.split()
rms_1.append(float(li[1]) * 10) # Å
time_1.append(frame * interval)
frame += 1
frame = 0
rms_2 = []
time_2 = []
with open(path_2 + filename) as f:
for j in f.readlines():
record = j.strip()
if len(record) == 0:
break
if record[0] not in ["#", "@"]:
li = record.split()
rms_2.append(float(li[1]) * 10) # Å
time_2.append(frame * interval)
frame += 1
ax1.scatter(time_1, rms_1, s=.8, label="crystal_WT")
ax1.scatter(time_2, rms_2, s=.8, label="cry_repacking")
plt.legend()
plt.show()
def dih_RAF(self):
crystal_WT_phi = []
crystal_WT_psi = []
cry_repacking_phi = []
cry_repacking_psi = []
serial = 5
num = 10
target = "psi"
temperature = "test"
# Residue number
Strand = [[4, 5, 6, 7, 8], [15, 16, 17, 18], [43, 44, 45, 46, 47], [72, 73, 74, 75, 76, 77]]
Alphahelix = [[25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36], [65, 66, 67, 68]]
WT_seq = ['THR', 'SER', 'ASN', 'THR', 'ILE', 'ARG', 'VAL', 'PHE', 'LEU', 'PRO', 'ASN', 'LYS', 'GLN', 'ARG',
'THR', 'VAL', 'VAL', 'ASN', 'VAL', 'ARG', 'ASN', 'GLY', 'MET', 'SER', 'LEU', 'HIS', 'ASP', 'CYS',
'LEU', 'MET', 'LYS', 'ALA', 'LEU', 'LYS', 'VAL', 'ARG', 'GLY', 'LEU', 'GLN', 'PRO', 'GLU', 'CYS',
'CYS', 'ALA', 'VAL', 'PHE', 'ARG', 'LEU', 'LEU', 'HIS', 'GLU', 'HIS', 'LYS', 'GLY', 'LYS', 'LYS',
'ALA', 'ARG', 'LEU', 'ASP', 'TRP', 'ASN', 'THR', 'ASP', 'ALA', 'ALA', 'SER', 'LEU', 'ILE', 'GLY',
'GLU', 'GLU', 'LEU', 'GLN', 'VAL', 'ASP', 'PHE', 'LEU']
repacking_seq = ['ALA', 'ASP', 'ARG', 'THR', 'ILE', 'GLU', 'VAL', 'GLU', 'LEU', 'PRO', 'ASN', 'LYS', 'GLN',
'ARG', 'THR', 'VAL', 'ILE', 'ASN', 'VAL', 'ARG', 'PRO', 'GLY', 'LEU', 'THR', 'LEU', 'LYS',
'GLU', 'ALA', 'LEU', 'LYS', 'LYS', 'ALA', 'LEU', 'LYS', 'VAL', 'ARG', 'GLY', 'ILE', 'ASP',
'PRO', 'ASN', 'LYS', 'VAL', 'GLN', 'VAL', 'TYR', 'LEU', 'LEU', 'LEU', 'SER', 'GLY', 'ASP',
'ASP', 'GLY', 'ALA', 'GLU', 'GLN', 'PRO', 'LEU', 'SER', 'LEU', 'ASN', 'HIS', 'PRO', 'ALA',
'GLU', 'ARG', 'LEU', 'ILE', 'GLY', 'LYS', 'LYS', 'LEU', 'LYS', 'VAL', 'VAL', 'PRO', 'LEU']
for k in range(1, serial + 1):
for i in range(1, num + 1):
sample = np.load("/Users/erik/Desktop/RAF/crystal_WT/{2}/{1}/phi_{0}.npy".format(i, k, temperature),
allow_pickle=True)
crystal_WT_phi += sample.tolist()
for i in range(1, num + 1):
sample = np.load("/Users/erik/Desktop/RAF/crystal_WT/{2}/{1}/psi_{0}.npy".format(i, k, temperature),
allow_pickle=True)
crystal_WT_psi += sample.tolist()
for i in range(1, num + 1):
sample = np.load("/Users/erik/Desktop/RAF/cry_repacking/{2}/{1}/phi_{0}.npy".format(i, k, temperature),
allow_pickle=True)
cry_repacking_phi += sample.tolist()
for i in range(1, num + 1):
sample = np.load("/Users/erik/Desktop/RAF/cry_repacking/{2}/{1}/psi_{0}.npy".format(i, k, temperature),
allow_pickle=True)
cry_repacking_psi += sample.tolist()
seqlen = len(WT_seq)
samplelen = len(crystal_WT_phi)
print(seqlen, samplelen)
crystal_WT_phi = np.array(crystal_WT_phi)
crystal_WT_psi = np.array(crystal_WT_psi)
cry_repacking_phi = np.array(cry_repacking_phi)
cry_repacking_psi = np.array(cry_repacking_psi)
# print(np.std(crystal_WT_psi[:,0]))
crystal_WT_phi_mean = []
crystal_WT_psi_mean = []
cry_repacking_phi_mean = []
cry_repacking_psi_mean = []
crystal_WT_phi_std = []
crystal_WT_psi_std = []
cry_repacking_phi_std = []
cry_repacking_psi_std = []
# test
"""i = 11
temp_WT = crystal_WT_phi[:, i]
temp_repack = cry_repacking_phi[:, i]
mode_WT = stats.mode(temp_WT.astype(np.int64))[0][0]
mode_repack = stats.mode(temp_repack.astype(np.int64))[0][0]
diff_WT = temp_WT - mode_WT
diff_repack = temp_repack - mode_repack
for p in range(samplelen):
if diff_WT[p] < -180:
diff_WT[p] = 360 + diff_WT[p]
if diff_WT[p] > 180:
diff_WT[p] = diff_WT[p] - 360
for p in range(samplelen):
if diff_repack[p] < -180:
diff_repack[p] = 360 + diff_repack[p]
if diff_repack[p] > 180:
diff_repack[p] = diff_repack[p] - 360
temp_WT = diff_WT + mode_WT
temp_repack = diff_repack + mode_repack
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('Figure', fontsize=20)
ax1.set_xlabel('residues', fontsize=20)
ax1.set_ylabel("Dihedral", fontsize=20)
ax1.hist(temp_WT, bins=100)"""
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('Figure', fontsize=20)
ax1.set_xlabel('residues', fontsize=20)
ax1.set_ylabel("Dihedral", fontsize=20)
if target == "psi":
# psi
for i in range(seqlen):
temp_WT = crystal_WT_psi[:, i]
temp_repack = cry_repacking_psi[:, i]
mode_WT = stats.mode(temp_WT.astype(np.int64))[0][0]
mode_repack = stats.mode(temp_repack.astype(np.int64))[0][0]
diff_WT = temp_WT - mode_WT
diff_repack = temp_repack - mode_repack
for p in range(samplelen):
if diff_WT[p] < -180:
diff_WT[p] = 360 + diff_WT[p]
if diff_WT[p] > 180:
diff_WT[p] = diff_WT[p] - 360
for p in range(samplelen):
if diff_repack[p] < -180:
diff_repack[p] = 360 + diff_repack[p]
if diff_repack[p] > 180:
diff_repack[p] = diff_repack[p] - 360
temp_WT = diff_WT + mode_WT
temp_repack = diff_repack + mode_repack
crystal_WT_psi_mean.append(np.mean(temp_WT))
cry_repacking_psi_mean.append(np.mean(temp_repack))
crystal_WT_psi_std.append(np.std(temp_WT))
cry_repacking_psi_std.append(np.std(temp_repack))
ax1.errorbar(range(1, len(crystal_WT_psi_mean) + 1), crystal_WT_psi_mean, yerr=crystal_WT_psi_std,
fmt="o", color="blue")
ax1.errorbar(range(1, len(cry_repacking_psi_mean) + 1), cry_repacking_psi_mean,
yerr=cry_repacking_psi_std, fmt="^", color="red", elinewidth=2)
elif target == "phi":
# phi
for i in range(seqlen):
temp_WT = crystal_WT_phi[:, i]
temp_repack = cry_repacking_phi[:, i]
mode_WT = stats.mode(temp_WT.astype(np.int64))[0][0]
mode_repack = stats.mode(temp_repack.astype(np.int64))[0][0]
diff_WT = temp_WT - mode_WT
diff_repack = temp_repack - mode_repack
for p in range(samplelen):
if diff_WT[p] < -180:
diff_WT[p] = 360 + diff_WT[p]
if diff_WT[p] > 180:
diff_WT[p] = diff_WT[p] - 360
for p in range(samplelen):
if diff_repack[p] < -180:
diff_repack[p] = 360 + diff_repack[p]
if diff_repack[p] > 180:
diff_repack[p] = diff_repack[p] - 360
temp_WT = diff_WT + mode_WT
temp_repack = diff_repack + mode_repack
crystal_WT_phi_mean.append(np.mean(temp_WT))
cry_repacking_phi_mean.append(np.mean(temp_repack))
crystal_WT_phi_std.append(np.std(temp_WT))
cry_repacking_phi_std.append(np.std(temp_repack))
ax1.errorbar(range(1, len(crystal_WT_phi_mean) + 1), crystal_WT_phi_mean, yerr=crystal_WT_phi_std,
fmt="o", color="blue", )
ax1.errorbar(range(1, len(cry_repacking_phi_mean) + 1), cry_repacking_phi_mean,
yerr=cry_repacking_phi_std, fmt="^", color="red", elinewidth=2)
"""if target == "phi":
for i in range(crystal_WT_phi.shape[-1]):
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(2, 1, 1)
ax2 = fig.add_subplot(2, 1, 2)
ax1.set_title('Phi', fontsize=20)
ax1.set_xlabel('', fontsize=20)
ax1.set_ylabel("{0}_{1}\nWT".format(i+1, WT_seq[i]), fontsize=20, rotation=0)
ax2.set_title('', fontsize=20)
ax2.set_xlabel('residues', fontsize=20)
ax2.set_ylabel("{0}_{1}\ndRafX6".format(i+1, repacking_seq[i]), fontsize=20, rotation=0)
# test1 = crystal_WT_phi[:,i]
# test2 = cry_repacking_phi[:,i]
ax1.hist(crystal_WT_phi[:,i], bins=100)
ax2.hist(cry_repacking_phi[:,i], bins=100)
plt.savefig("/Users/erik/Desktop/RAF/compare_WT_vs_repacking/{1}/dih/phi/{0}_phi.png".format(i + 1, temperature))
ax1.cla()
ax2.cla()
elif target == "psi":
for i in range(crystal_WT_psi.shape[-1]):
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(2, 1, 1)
ax2 = fig.add_subplot(2, 1, 2)
ax1.set_title('Psi', fontsize=20)
ax1.set_xlabel('', fontsize=20)
ax1.set_ylabel("{0}_{1}\nWT".format(i+1, WT_seq[i]), fontsize=15, rotation=0)
ax2.set_title('', fontsize=20)
ax2.set_xlabel('residues', fontsize=20)
ax2.set_ylabel("{0}_{1}\ndRafX6".format(i+1, repacking_seq[i]), fontsize=15, rotation=0)
# test1 = crystal_WT_phi[:,i]
# test2 = cry_repacking_phi[:,i]
ax1.hist(crystal_WT_psi[:,i], bins=100)
ax2.hist(cry_repacking_psi[:,i], bins=100)
plt.savefig("/Users/erik/Desktop/RAF/compare_WT_vs_repacking/{1}/dih/psi/{0}_psi.png".format(i+1, temperature))
ax1.cla()
ax2.cla()"""
# strand
for strand in Strand:
ax1.plot(strand, [-220] * len(strand), color="black")
# alpha
for alpha in Alphahelix:
ax1.plot(alpha, [-220] * len(alpha), color="black")
plt.show()
def output_aa_name(self, path):
# Read amino acid sequence
print("Reading sequence:", path)
aa_nam = []
cur_resSeq = 0
with open(path, 'r') as f:
for i in f.readlines():
record = i.strip()
atom = record[:4].strip()
if atom != "ATOM":
continue
resName = self.processIon(record[17:20].strip()) # PRO
resSeq = int(record[22:26].strip()) # 3
if resName not in self.aa:
continue
if resSeq != cur_resSeq:
aa_nam.append(resName)
cur_resSeq = resSeq
return aa_nam
def plot_PCA_2d(self):
path = "/Users/erik/Desktop/RAF/crystal_WT/test/1/2d.xvg"
projection_1 = []
projection_2 = []
with open(path) as file:
for j in file.readlines():
record = j.strip()
if len(record) == 0:
break
if record[0] not in ["#", "@"]:
li = record.split()
projection_1.append(float(li[0]))
projection_2.append(float(li[1]))
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('Projection', fontsize=20)
ax1.set_xlabel('Projection_1', fontsize=20)
ax1.set_ylabel("Projection_2", fontsize=20)
ax1.scatter(projection_1, projection_2, s=.5)
plt.show()
def plot_PCA_3d(self):
path = "/Users/erik/Desktop/RAF/crystal_WT/test/1/3dproj.pdb"
projection_1 = []
projection_2 = []
projection_3 = []
with open(path, 'r') as file:
for j in file.readlines():
record = j.strip().split()
if record[0] != "ATOM":
continue
projection_1.append(float(record[5]))
projection_2.append(float(record[6]))
projection_3.append(float(record[7]))
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('Projection', fontsize=20)
ax1.set_xlabel('Projection_1', fontsize=20)
ax1.set_ylabel("Projection_2", fontsize=20)
ax1.scatter(projection_1, projection_2, s=.5)
plt.show()
def dih_RAF_5(self):
WD = "/Users/erik/Desktop/RAF"
serial = 5
num = 10
temperature = "384K"
group = "dRafX6"
paths = ['/'.join((WD, group, temperature, "dih_5")),
'/'.join((WD, group, temperature, "dih_5", "phi")),
'/'.join((WD, group, temperature, "dih_5", "psi"))]
for dir_name in paths:
if not os.path.exists(dir_name):
os.mkdir(dir_name)
# Residue number
Strand = [[4, 5, 6, 7, 8], [15, 16, 17, 18], [43, 44, 45, 46, 47], [72, 73, 74, 75, 76, 77]]
Alphahelix = [[25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36], [65, 66, 67, 68]]
WT_seq = ['THR', 'SER', 'ASN', 'THR', 'ILE', 'ARG', 'VAL', 'PHE', 'LEU', 'PRO', 'ASN', 'LYS', 'GLN', 'ARG',
'THR', 'VAL', 'VAL', 'ASN', 'VAL', 'ARG', 'ASN', 'GLY', 'MET', 'SER', 'LEU', 'HIS', 'ASP', 'CYS',
'LEU', 'MET', 'LYS', 'ALA', 'LEU', 'LYS', 'VAL', 'ARG', 'GLY', 'LEU', 'GLN', 'PRO', 'GLU', 'CYS',
'CYS', 'ALA', 'VAL', 'PHE', 'ARG', 'LEU', 'LEU', 'HIS', 'GLU', 'HIS', 'LYS', 'GLY', 'LYS', 'LYS',
'ALA', 'ARG', 'LEU', 'ASP', 'TRP', 'ASN', 'THR', 'ASP', 'ALA', 'ALA', 'SER', 'LEU', 'ILE', 'GLY',
'GLU', 'GLU', 'LEU', 'GLN', 'VAL', 'ASP', 'PHE', 'LEU']
repacking_seq = ['ALA', 'ASP', 'ARG', 'THR', 'ILE', 'GLU', 'VAL', 'GLU', 'LEU', 'PRO', 'ASN', 'LYS', 'GLN',
'ARG', 'THR', 'VAL', 'ILE', 'ASN', 'VAL', 'ARG', 'PRO', 'GLY', 'LEU', 'THR', 'LEU', 'LYS',
'GLU', 'ALA', 'LEU', 'LYS', 'LYS', 'ALA', 'LEU', 'LYS', 'VAL', 'ARG', 'GLY', 'ILE', 'ASP',
'PRO', 'ASN', 'LYS', 'VAL', 'GLN', 'VAL', 'TYR', 'LEU', 'LEU', 'LEU', 'SER', 'GLY', 'ASP',
'ASP', 'GLY', 'ALA', 'GLU', 'GLN', 'PRO', 'LEU', 'SER', 'LEU', 'ASN', 'HIS', 'PRO', 'ALA',
'GLU', 'ARG', 'LEU', 'ILE', 'GLY', 'LYS', 'LYS', 'LEU', 'LYS', 'VAL', 'VAL', 'PRO', 'LEU']
for p in range(len(WT_seq)): # Amino acid site
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
for k in range(1, serial + 1):
crystal_WT_phi = []
for i in range(1, num + 1):
sample = np.load("/Users/erik/Desktop/RAF/{3}/{2}/{1}/phi_{0}.npy".format(i, k, temperature, group),
allow_pickle=True).tolist()
crystal_WT_phi += sample
crystal_WT_phi = np.array(crystal_WT_phi)
ax1 = fig.add_subplot(2, 3, k)
ax1.cla()
ax1.set_xlabel('residues', fontsize=5)
ax1.set_ylabel("Dihedral", fontsize=5)
ax1.hist(crystal_WT_phi[:, p], bins=100)
plt.savefig(
"/Users/erik/Desktop/RAF/{2}/{1}/dih_5/phi/{0}_phi.png".format(p + 1, temperature, group))
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
for k in range(1, serial + 1):
crystal_WT_psi = []
for i in range(1, num + 1):
sample = np.load("/Users/erik/Desktop/RAF/{3}/{2}/{1}/psi_{0}.npy".format(i, k, temperature, group),
allow_pickle=True).tolist()
crystal_WT_psi += sample
crystal_WT_psi = np.array(crystal_WT_psi)
ax1 = fig.add_subplot(2, 3, k)
ax1.cla()
ax1.set_xlabel('residues', fontsize=5)
ax1.set_ylabel("Dihedral", fontsize=5)
ax1.hist(crystal_WT_psi[:, p], bins=100)
plt.savefig(
"/Users/erik/Desktop/RAF/{2}/{1}/dih_5/psi/{0}_psi.png".format(p + 1, temperature, group))
seqlen = len(WT_seq)
samplelen = len(crystal_WT_phi)
print(seqlen, samplelen)
def LJ_SR_5(self):
serial = 5
group = "dRafX6"
temperature = "384K"
path = "/Users/erik/Desktop/RAF/{0}/{1}/{2}/E_LJ_SR.xvg"
inter = 20
dt = 0.02 # ns
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
for k in range(1, serial + 1):
LJ_SR = []
with open(path.format(group, temperature, k), "r") as file:
for i in file.readlines():
if i[0] not in ["#", "@"]:
record = i.strip().split()
LJ_SR.append(float(record[-1]))
LJ_SR = self.space_data(li=LJ_SR, interval=inter)
print(len(LJ_SR))
file.close()
ax1 = fig.add_subplot(2, 3, k)
ax1.cla()
ax1.set_xlabel('time(ns)', fontsize=5)
ax1.set_ylabel("LJ_SR", fontsize=5)
ax1.plot(np.array([l for l in range(len(LJ_SR))]) * (dt * inter),
LJ_SR)
plt.show()
def E_Tem(self):
serial = 5
group = "dRafX6"
temperature = "384K"
path = "/Users/erik/Desktop/RAF/{0}/{1}/{2}/E_Tem.xvg"
inter = 20
dt = 0.02 # ns
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
for k in range(1, serial + 1):
Tem = []
with open(path.format(group, temperature, k), "r") as file:
for i in file.readlines():
if i[0] not in ["#", "@"]:
record = i.strip().split()
Tem.append(float(record[-1]))
Tem = self.space_data(li=Tem, interval=inter)
print(len(Tem))
file.close()
ax1 = fig.add_subplot(2, 3, k)
ax1.cla()
ax1.set_xlabel('time(ns)', fontsize=5)
ax1.set_ylabel("Tem", fontsize=5)
ax1.plot(np.array([l for l in range(len(Tem))]) * (dt * inter),
Tem)
plt.show()
def E_Coulomb_SR(self):
serial = 5
group = "dRafX6"
temperature = "384K"
path = "/Users/erik/Desktop/RAF/{0}/{1}/{2}/E_Coulomb_SR.xvg"
inter = 20
dt = 0.02 # ns
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
for k in range(1, serial + 1):
Coulomb_SR = []
with open(path.format(group, temperature, k), "r") as file:
for i in file.readlines():
if i[0] not in ["#", "@"]:
record = i.strip().split()
Coulomb_SR.append(float(record[-1]))
Coulomb_SR = self.space_data(li=Coulomb_SR, interval=inter)
print(len(Coulomb_SR))
file.close()
ax1 = fig.add_subplot(2, 3, k)
ax1.cla()
ax1.set_xlabel('time(ns)', fontsize=5)
ax1.set_ylabel("Coulomb_SR", fontsize=5)
ax1.plot(np.array([l for l in range(len(Coulomb_SR))]) * (dt * inter),
Coulomb_SR)
plt.show()
def construct_rmsd_matrix(self): # used for ave.pdb for now
part = 4 # 4 for backbone(BB); 8 for sidechain(SC) in most cases
WD = "/home/caofan/RAF/"
group = "crystal_WT" # "dRafX6"
temperatures = ["300K", "344K", "384K"]
rmsd_mat = np.zeros(shape=(15, 15), dtype=np.float32)
paths = []
names = []
serial = 5
for temperature in temperatures:
for k in range(1, serial + 1):
paths.append(WD
+ '{0}/{1}/{2}/'.format(group, temperature, k))
names.append("{0}/{1}/{2}".format(group, temperature, k))
for p in range(len(paths)):
path1 = paths[p]
for q in range(p + 1, len(paths)):
path2 = paths[q]
rmsd_mat[p][q] = self.rmsd_between(path1=path1 + "pro_ave.pdb", path2=path2 + "pro_ave.pdb", part=part)
print(rmsd_mat)
np.save("./rmsd_{0}_{1}.npy".format(group, "BB" if part == 4 else "SC"), rmsd_mat)
rmsd_mat = np.load("./rmsd_{0}_{1}.npy".format(group, "BB" if part == 4 else "SC"), allow_pickle=True)
df = pd.DataFrame(rmsd_mat)
df.columns = names
df.index = names
df.to_csv("./rmsd_{0}_{1}.csv".format(group, "BB" if part == 4 else "SC"))
def self_rmsd(self):
part = 4 # 4 for backbone(BB); 8 for sidechain(SC) in most cases
WD = "/home/caofan/RAF/"
group = "crystal_WT" # "dRafX6"
temperatures = ["300K", "344K", "384K"]
rmsd_mat = np.zeros(shape=(3, 5), dtype=np.float32)
serial = 5
for temperature in range(len(temperatures)):
for k in range(1, serial + 1):
path = WD + '{0}/{1}/{2}/'.format(group, temperatures[temperature], k)
rmsd_mat[temperature][k - 1] = self.rmsd_between(
path1=path + "md{0}.tpr".format(temperatures[temperature]), path2=path + "pro_ave.pdb", part=part)
print(rmsd_mat)
np.save("./self_rmsd_{0}_{1}.npy".format(group, "BB" if part == 4 else "SC"), rmsd_mat)
rmsd_mat = np.load("./self_rmsd_{0}_{1}.npy".format(group, "BB" if part == 4 else "SC"), allow_pickle=True)
df = | pd.DataFrame(rmsd_mat) | pandas.DataFrame |
from collections import Counter
import pandas as pd
import pytest
from simplekv import KeyValueStore
from kartothek.api.discover import (
discover_cube,
discover_datasets,
discover_datasets_unchecked,
discover_ktk_cube_dataset_ids,
)
from kartothek.core.cube.constants import (
KTK_CUBE_DF_SERIALIZER,
KTK_CUBE_METADATA_DIMENSION_COLUMNS,
KTK_CUBE_METADATA_KEY_IS_SEED,
KTK_CUBE_METADATA_PARTITION_COLUMNS,
KTK_CUBE_METADATA_STORAGE_FORMAT,
KTK_CUBE_METADATA_SUPPRESS_INDEX_ON,
KTK_CUBE_METADATA_VERSION,
)
from kartothek.core.cube.cube import Cube
from kartothek.core.uuid import gen_uuid
from kartothek.io.eager import (
store_dataframes_as_dataset,
update_dataset_from_dataframes,
)
from kartothek.io_components.metapartition import MetaPartition
@pytest.fixture
def cube():
return Cube(
dimension_columns=["x", "y"],
partition_columns=["p", "q"],
uuid_prefix="cube",
index_columns=["i1"],
seed_dataset="myseed",
)
def store_data(
cube,
function_store,
df,
name,
partition_on="default",
metadata_version=KTK_CUBE_METADATA_VERSION,
metadata_storage_format=KTK_CUBE_METADATA_STORAGE_FORMAT,
metadata=None,
overwrite=False,
new_ktk_cube_metadata=True,
write_suppress_index_on=True,
):
if partition_on == "default":
partition_on = cube.partition_columns
if isinstance(df, pd.DataFrame):
mp = MetaPartition(label=gen_uuid(), data=df, metadata_version=metadata_version)
indices_to_build = set(cube.index_columns) & set(df.columns)
if name == cube.seed_dataset:
indices_to_build |= set(cube.dimension_columns) - set(
cube.suppress_index_on
)
mp = mp.build_indices(indices_to_build)
dfs = mp
else:
assert isinstance(df, MetaPartition)
assert df.metadata_version == metadata_version
dfs = df
if metadata is None:
metadata = {
KTK_CUBE_METADATA_DIMENSION_COLUMNS: cube.dimension_columns,
KTK_CUBE_METADATA_KEY_IS_SEED: (name == cube.seed_dataset),
}
if new_ktk_cube_metadata:
metadata.update(
{KTK_CUBE_METADATA_PARTITION_COLUMNS: cube.partition_columns}
)
if write_suppress_index_on:
metadata.update(
{KTK_CUBE_METADATA_SUPPRESS_INDEX_ON: list(cube.suppress_index_on)}
)
return store_dataframes_as_dataset(
store=function_store,
dataset_uuid=cube.ktk_dataset_uuid(name),
dfs=dfs,
partition_on=list(partition_on) if partition_on else None,
metadata_storage_format=metadata_storage_format,
metadata_version=metadata_version,
df_serializer=KTK_CUBE_DF_SERIALIZER,
metadata=metadata,
overwrite=overwrite,
)
def assert_datasets_equal(left, right):
assert set(left.keys()) == set(right.keys())
for k in left.keys():
ds_l = left[k]
ds_r = right[k]
assert ds_l.uuid == ds_r.uuid
def assert_dataset_issubset(superset, subset):
assert set(subset.keys()).issubset(set(superset.keys()))
for k in subset.keys():
assert subset[k].uuid == superset[k].uuid
def test_discover_ktk_cube_dataset_ids(function_store):
cube = Cube(
dimension_columns=["dim"],
partition_columns=["part"],
uuid_prefix="cube",
seed_dataset="seed",
)
ktk_cube_dataset_ids = ["A", "B", "C"]
for ktk_cube_id in ktk_cube_dataset_ids:
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"dim": [0], "part": [0]}),
name=ktk_cube_id,
)
collected_ktk_cube_dataset_ids = discover_ktk_cube_dataset_ids(
cube.uuid_prefix, function_store()
)
assert collected_ktk_cube_dataset_ids == set(ktk_cube_dataset_ids)
class TestDiscoverDatasetsUnchecked:
def test_simple(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="enrich",
),
}
actual = discover_datasets_unchecked(cube.uuid_prefix, function_store)
assert_datasets_equal(actual, expected)
def test_no_seed(self, cube, function_store):
expected = {
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="enrich",
)
}
actual = discover_datasets_unchecked(cube.uuid_prefix, function_store)
assert_datasets_equal(actual, expected)
def test_other_files(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
)
}
function_store().put(cube.ktk_dataset_uuid("enrich") + "/foo", b"")
actual = discover_datasets_unchecked(cube.uuid_prefix, function_store)
assert_datasets_equal(actual, expected)
def test_no_common_metadata(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
)
}
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="enrich",
)
keys = set(function_store().keys())
metadata_key = cube.ktk_dataset_uuid("enrich") + ".by-dataset-metadata.json"
assert metadata_key in keys
for k in keys:
if (k != metadata_key) and k.startswith(cube.ktk_dataset_uuid("enrich")):
function_store().delete(k)
actual = discover_datasets_unchecked(cube.uuid_prefix, function_store)
assert_datasets_equal(actual, expected)
def test_filter_partial_datasets_found(self, cube, function_store):
enrich_dataset = store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="enrich",
)
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="mytable",
)
expected = {"enrich": enrich_dataset}
actual = discover_datasets_unchecked(
cube.uuid_prefix, function_store, filter_ktk_cube_dataset_ids=["enrich"]
)
assert_dataset_issubset(actual, expected)
def test_filter_no_datasets_found(self, cube, function_store):
actual = discover_datasets_unchecked(
cube.uuid_prefix, function_store, filter_ktk_cube_dataset_ids=["enrich"]
)
assert actual == {}
def test_msgpack_clean(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="enrich",
metadata_storage_format="msgpack",
),
}
actual = discover_datasets_unchecked(cube.uuid_prefix, function_store)
assert_datasets_equal(actual, expected)
def test_msgpack_priority(self, cube, function_store):
"""
json metadata files have priority in kartothek, so the disovery should respect this
"""
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": [0]}),
name=cube.seed_dataset,
metadata_storage_format="msgpack",
)
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v2": [0]}),
name=cube.seed_dataset,
overwrite=True,
)
}
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v3": [0]}),
name=cube.seed_dataset,
metadata_storage_format="msgpack",
overwrite=True,
)
actual = discover_datasets_unchecked(cube.uuid_prefix, function_store)
assert_datasets_equal(actual, expected)
def test_msgpack_efficiency(self, cube, function_store):
"""
We should only iterate over the store once, even though we are looking for 2 suffixes.
Furthermore, we must only load every dataset once.
"""
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
metadata_storage_format="msgpack",
)
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
overwrite=True,
)
class StoreMock(KeyValueStore):
def __init__(self, store):
self._store = store
self._iter_keys_called = 0
self._iter_prefixes_called = 0
self._get_called = Counter()
def iter_keys(self, prefix=""):
self._iter_keys_called += 1
return self._store.iter_keys(prefix)
def iter_prefixes(self, delimiter, prefix=""):
self._iter_prefixes_called += 1
return self._store.iter_prefixes(delimiter, prefix)
def get(self, key):
self._get_called[key] += 1
return self._store.get(key)
store = StoreMock(function_store())
discover_datasets_unchecked(cube.uuid_prefix, store)
assert store._iter_keys_called == 0
assert store._iter_prefixes_called == 1
assert max(store._get_called.values()) == 1
class TestDiscoverDatasets:
def test_seed_only(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
)
}
actual = discover_datasets(cube, function_store)
assert_datasets_equal(actual, expected)
def test_2_datasets(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df= | pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}) | pandas.DataFrame |
import pandas as pd
import sasoptpy as so
import requests
from subprocess import Popen, DEVNULL
# Solves the pre-season optimization problem
def get_data():
r = requests.get('https://fantasy.premierleague.com/api/bootstrap-static/')
fpl_data = r.json()
element_data = pd.DataFrame(fpl_data['elements'])
team_data = pd.DataFrame(fpl_data['teams'])
elements_team = pd.merge(element_data, team_data, left_on='team', right_on='id')
review_data = | pd.read_csv('../data/fplreview.csv') | pandas.read_csv |
import glob
import os
import pandas as pd
import yaml
from flatten_dict import flatten
from ensembler.p_tqdm import t_imap as mapper
import re
from functools import partial
from ensembler.Dataset import Dataset
from ensembler.datasets import Datasets
def process_file(file_path: str) -> pd.DataFrame:
file_dir = os.path.dirname(file_path)
job_hash = os.path.split(file_dir)[-1]
metrics = pd.read_csv(file_path)
config_file = os.path.join(file_dir, "config.yaml")
models = glob.glob(
os.path.join(file_dir, "lightning_logs", "**", "checkpoints", "*.ckpt"))
model_scores = [
float(re.findall(r'[-+]?[0-9]*\.?[0-9]+', os.path.basename(m))[-1])
for m in models
]
model_idx = model_scores.index(min(model_scores))
model = models[model_idx]
epoch, loss = re.findall(r'[-+]?[0-9]*\.?[0-9]+', os.path.basename(model))
epoch = int(epoch)
loss = float(loss)
with open(os.path.join(file_dir, "predict_time.txt")) as pt:
predict_time = float(pt.readline())
with open(config_file, 'r') as file:
config = yaml.safe_load(file)
config = flatten(config, reducer="underscore")
for key, val in config.items():
metrics[key] = val
metrics["job_hash"] = job_hash
metrics["epoch"] = epoch
metrics["loss"] = loss
metrics["predict_time"] = predict_time
return metrics
def combine_metrics(in_dir: str):
dataset = Datasets["cityscapes"]
datamodule = Dataset(dataset=dataset, batch_size=1)
dataset = Datasets.get(dataset.value)
dataloader = datamodule.test_dataloader()
image_names = datamodule.test_data.dataset.get_image_names()
in_dir = os.path.abspath(in_dir)
job_hashes = [
d for d in os.listdir(in_dir)
if os.path.isdir(os.path.join(in_dir, d)) and
d not in ["ebms", "ensembles", "test", "val"]
]
metrics_files = glob.glob(os.path.join(in_dir, "**", "metrics.csv"))
metric_directories = [
os.path.split(os.path.dirname(f))[-1] for f in metrics_files
]
missing_metrics = [m for m in job_hashes if m not in metric_directories]
if missing_metrics:
raise FileExistsError(
"Could not find metrics for: {}".format(missing_metrics))
combined_metrics = []
for df in mapper(process_file, metrics_files):
combined_metrics.append(df)
combined_metrics = | pd.concat(combined_metrics, ignore_index=True) | pandas.concat |
import sys
sys.path.append('../')
#code below used to deal with special characters on the file path during read_csv()
sys._enablelegacywindowsfsencoding()
import numpy as np
import seaborn as sns
import pandas as pd
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
import pyswarms as ps
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from pyswarms.utils.plotters import plot_cost_history
# Define objective function
def f_per_particle(m, alpha):
"""Computes for the objective function per particle
Inputs
------
m : numpy.ndarray
Binary mask that can be obtained from BinaryPSO, will
be used to mask features.
alpha: float (default is 0.5)
Constant weight for trading-off classifier performance
and number of features
Returns
-------
numpy.ndarray
Computed objective function
"""
total_features = X.shape[1]
# Get the subset of the features from the binary mask
if np.count_nonzero(m) == 0:
#if the particle subset is only zeros, get the original set of attributes
X_subset = X
else:
X_subset = X[:,m==1]
#X_train, X_test, y_train, y_test = train_test_split(X_subset, y, test_size=0.20, random_state=None)
# Perform classification and store performance in P
#classifier.fit(X_train, y_train)
#P = (classifier.predict(X_test) == y_test).mean()
scores = cross_val_score(classifier, X_subset, y, cv=3)
#print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
P = scores.mean()
particleScore.append(P)
particleSize.append(X_subset.shape[1])
# Compute for the objective function
j = (alpha * (1.0 - P)
+ (1.0 - alpha) * (1 - (X_subset.shape[1] / total_features)))
#j = (alpha * (1.0 - P)) + (1 - alpha) * (1 - (total_features - X_subset.shape[1]) / total_features)
#print("Particle j: ", j)
return j
def f(x, alpha=0.9):
"""Higher-level method to do classification in the
whole swarm.
Inputs
------
x: numpy.ndarray of shape (n_particles, dimensions)
The swarm that will perform the search
Returns
-------
numpy.ndarray of shape (n_particles, )
The computed loss for each particle
"""
n_particles = x.shape[0]
j = [f_per_particle(x[i], alpha) for i in range(n_particles)]
#print("f j: ", j)
return np.array(j)
data_temp= | pd.read_csv('faults.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
#
# License: This module is released under the terms of the LICENSE file
# contained within this applications INSTALL directory
"""
Defines the ForecastModel class, which encapsulates model functions used in
forecast model fitting, as well as their number of parameters and
initialisation parameters.
"""
# -- Coding Conventions
# http://www.python.org/dev/peps/pep-0008/ - Use the Python style guide
# http://sphinx.pocoo.org/rest.html - Use Restructured Text for
# docstrings
# -- Public Imports
import itertools
import logging
import numpy as np
import pandas as pd
from pandas.tseries.holiday import Holiday, AbstractHolidayCalendar, \
MO, nearest_workday, next_monday, next_monday_or_tuesday, \
GoodFriday, EasterMonday, USFederalHolidayCalendar
from pandas.tseries.offsets import DateOffset
from datetime import datetime
# -- Private Imports
from anticipy import model_utils
# -- Globals
logger = logging.getLogger(__name__)
# Fourier model configuration
_dict_fourier_config = { # Default configuration for fourier-based models
'period': 365.25, # days in year
'harmonics': 10 # TODO: evaluate different harmonics values
}
_FOURIER_PERIOD = 365.25
_FOURIER_HARMONICS = 10 # TODO: evaluate different harmonics values
_FOURIER_K = (2.0 * np.pi / _FOURIER_PERIOD)
_FOURIER_I = np.arange(1, _FOURIER_HARMONICS + 1)
_FOURIER_DATE_ORIGIN = datetime(1970, 1, 1)
# -- Functions
# ---- Utility functions
def logger_info(msg, data):
# Convenience function for easier log typing
logger.info(msg + '\n%s', data)
def _get_f_init_params_default(n_params):
# Generate a default function for initialising model parameters: use
# random values between 0 and 1
return lambda a_x=None, a_y=None, a_date=None, is_mult=False:\
np.random.uniform(low=0.001, high=1, size=n_params)
def _get_f_bounds_default(n_params):
# Generate a default function for model parameter boundaries. Default
# boundaries are (-inf, inf)
return lambda a_x=None, a_y=None, a_date=None: (
n_params * [-np.inf], n_params * [np.inf])
def _get_f_add_2_f_models(forecast_model1, forecast_model2):
# Add model functions of 2 ForecastModels
def f_add_2_f_models(a_x, a_date, params, is_mult=False, **kwargs):
params1 = params[0:forecast_model1.n_params]
params2 = params[forecast_model1.n_params:]
return (
forecast_model1.f_model(
a_x,
a_date,
params1,
is_mult=False,
**kwargs) +
forecast_model2.f_model(
a_x,
a_date,
params2,
is_mult=False,
**kwargs))
return f_add_2_f_models
def _get_f_mult_2_f_models(forecast_model1, forecast_model2):
# Multiply model functions of 2 ForecastModels
def f_mult_2_f_models(a_x, a_date, params, is_mult=False, **kwargs):
params1 = params[0:forecast_model1.n_params]
params2 = params[forecast_model1.n_params:]
return (
forecast_model1.f_model(
a_x,
a_date,
params1,
is_mult=True,
**kwargs) *
forecast_model2.f_model(
a_x,
a_date,
params2,
is_mult=True,
**kwargs))
return f_mult_2_f_models
def _get_f_add_2_f_init_params(f_init_params1, f_init_params2):
# Compose parameter initialisation functions of 2 ForecastModels, using
# addition
def f_add_2_f_init_params(a_x, a_y, a_date=None, is_mult=False):
return np.concatenate(
[f_init_params1(a_x, a_y, a_date, is_mult=False),
f_init_params2(a_x, a_y, a_date, is_mult=False)])
return f_add_2_f_init_params
def _get_f_mult_2_f_init_params(f_init_params1, f_init_params2):
# Compose parameter initialisation functions of 2 ForecastModels, using
# multiplication
def f_mult_2_f_init_params(a_x, a_y, a_date=None, is_mult=False):
return np.concatenate(
[f_init_params1(a_x, a_y, a_date, is_mult=True),
f_init_params2(a_x, a_y, a_date, is_mult=True)])
return f_mult_2_f_init_params
def _get_f_concat_2_bounds(forecast_model1, forecast_model2):
# Compose parameter boundary functions of 2 ForecastModels
def f_add_2_f_bounds(a_x, a_y, a_date=None):
return np.concatenate(
(forecast_model1.f_bounds(
a_x, a_y, a_date), forecast_model2.f_bounds(
a_x, a_y, a_date)), axis=1)
return f_add_2_f_bounds
def _f_validate_input_default(a_x, a_y, a_date):
# Default input validation function for a ForecastModel. Always returns
# True
return True
def _as_list(l):
return l if isinstance(l, (list,)) else [l]
# Functions used to initialize cache variables in a ForecastModel
def _f_init_cache_a_month(a_x, a_date):
return a_date.month - 1
def _f_init_cache_a_weekday(a_x, a_date):
return a_date.weekday
def _f_init_cache_a_t_fourier(a_x, a_date):
# convert to days since epoch
t = (a_date - _FOURIER_DATE_ORIGIN).days.values
i = np.arange(1, _FOURIER_HARMONICS + 1)
a_tmp = _FOURIER_K * i.reshape(i.size, 1) * t
y = np.concatenate([np.sin(a_tmp), np.cos(a_tmp)])
return y
# Dictionary to store functions used to initialize cache variables
# in a ForecastModel
# This is shared across all ForecastModel instances
_dict_f_cache = dict(
a_month=_f_init_cache_a_month,
a_weekday=_f_init_cache_a_weekday,
a_t_fourier=_f_init_cache_a_t_fourier
)
# -- Classes
class ForecastModel:
"""
Class that encapsulates model functions for use in forecasting, as well as
their number of parameters and functions for parameter initialisation.
A ForecastModel instance is initialized with a model name, a number of
model parameters, and a model function. Class instances are
callable - when called as a function, their internal model function is
used. The main purpose of ForecastModel objects is to generate predicted
values for a time series, given a set of parameters. These values can be
compared to the original series to get an array of residuals::
y_predicted = model(a_x, a_date, params)
residuals = (a_y - y_predicted)
This is used in an optimization loop to obtain the optimal parameters for
the model.
The reason for using this class instead of raw model functions is that
ForecastModel supports function composition::
model_sum = fcast_model1 + fcast_model2
# fcast_model 1 and 2 are ForecastModel instances, and so is model_sum
a_y1 = fcast_model1(
a_x, a_date, params1) + fcast_model2(a_x, a_date, params2)
params = np.concatenate([params1, params2])
a_y2 = model_sum(a_x, a_date, params)
a_y1 == a_y2 # True
Forecast models can be added or multiplied, with the + and * operators.
Multiple levels of composition are supported::
model = (model1 + model2) * model3
Model composition is used to aggregate trend and seasonality model
components, among other uses.
Model functions have the following signature:
- f(a_x, a_date, params, is_mult)
- a_x : array of floats
- a_date: array of dates, same length as a_x. Only required for date-aware
models, e.g. for weekly seasonality.
- params: array of floats - model parameters - the optimisation loop
updates this to fit our actual values. Each
model function uses a fixed number of parameters.
- is_mult: boolean. True if the model is being used with multiplicative
composition. Required because
some model functions (e.g. steps) have different behaviour
when added to other models than when multiplying them.
- returns an array of floats - with same length as a_x - output of the
model defined by this object's modelling function f_model and the
current set of parameters
By default, model parameters are initialized as random values between
0 and 1. It is possible to define a parameter initialization function
that picks initial values based on the original time series.
This is passed during ForecastModel creation with the argument
f_init_params. Parameter initialization is compatible with model
composition: the initialization function of each component will be used
for that component's parameters.
Parameter initialisation functions have the following signature:
- f_init_params(a_x, a_y, is_mult)
- a_x: array of floats - same length as time series
- a_y: array of floats - time series values
- returns an array of floats - with length equal to this object's n_params
value
By default, model parameters have no boundaries. However, it is possible
to define a boundary function for a model, that sets boundaries for each
model parameter, based on the input time series. This is passed during
ForecastModel creation with the argument f_bounds.
Boundary definition is compatible with model composition:
the boundary function of each component will be used for that component's
parameters.
Boundary functions have the following signature:
- f_bounds(a_x, a_y, a_date)
- a_x: array of floats - same length as time series
- a_y: array of floats - time series values
- a_date: array of dates, same length as a_x. Only required for date-aware
models, e.g. for weekly seasonality.
- returns a tuple of 2 arrays of floats. The first defines minimum
parameter boundaries, and the second the maximum parameter boundaries.
As an option, we can assign a list of input validation functions to a
model. These functions analyse the inputs that will be used for fitting a
model, returning True if valid, and False otherwise. The forecast logic
will skip a model from fitting if any of the validation functions for that
model returns False.
Input validation functions have the following signature:
- f_validate_input(a_x, a_y, a_date)
- See the description of model functions above for more details on these
parameters.
Our input time series should meet the following constraints:
- Minimum required samples depends on number of model parameters
- May include null values
- May include multiple values per sample
- A date array is only required if the model is date-aware
Class Usage::
model_x = ForecastModel(name, n_params, f_model, f_init_params,
l_f_validate_input)
# Get model name
model_name = model_x.name
# Get number of model parameters
n_params = model_x.n_params
# Get parameter initialisation function
f_init_params = model_x.f_init_params
# Get initial parameters
init_params = f_init_params(t_values, y_values)
# Get model fitting function
f_model = model_x.f_model
# Get model output
y = f_model(a_x, a_date, parameters)
The following pre-generated models are available. They are available as attributes from this module: # noqa
.. csv-table:: Forecast models
:header: "name", "params", "formula","notes"
:widths: 20, 10, 20, 40
"model_null",0, "y=0", "Does nothing.
Used to disable components (e.g. seasonality)"
"model_constant",1, "y=A", "Constant model"
"model_linear",2, "y=Ax + B", "Linear model"
"model_linear_nondec",2, "y=Ax + B", "Non decreasing linear model.
With boundaries to ensure model slope >=0"
"model_quasilinear",3, "y=A*(x^B) + C", "Quasilinear model"
"model_exp",2, "y=A * B^x", "Exponential model"
"model_decay",4, "Y = A * e^(B*(x-C)) + D", "Exponential decay model"
"model_step",2, "y=0 if x<A, y=B if x>=A", "Step model"
"model_two_steps",4, "see model_step", "2 step models.
Parameter initialization is aware of # of steps."
"model_sigmoid_step",3, "y = A + (B - A) / (1 + np.exp(- D * (x - C)))
", "Sigmoid step model"
"model_sigmoid",3, "y = A + (B - A) / (1 + np.exp(- D * (x - C)))", "
Sigmoid model"
"model_season_wday",7, "see desc.", "Weekday seasonality model.
Assigns a constant value to each weekday"
"model_season_wday",6, "see desc.", "6-param weekday seasonality model.
As above, with one constant set to 0."
"model_season_wday_2",2, "see desc.", "Weekend seasonality model.
Assigns a constant to each of weekday/weekend"
"model_season_month",12, "see desc.", "Month seasonality model.
Assigns a constant value to each month"
"model_season_fourier_yearly",10, "see desc", "Fourier
yearly seasonality model"
"""
def __init__(
self,
name,
n_params,
f_model,
f_init_params=None,
f_bounds=None,
l_f_validate_input=None,
l_cache_vars=None,
dict_f_cache=None,
):
"""
Create ForecastModel
:param name: Model name
:type name: basestring
:param n_params: Number of parameters for model function
:type n_params: int
:param f_model: Model function
:type f_model: function
:param f_init_params: Parameter initialisation function
:type f_init_params: function
:param f_bounds: Boundary function
:type f_bounds: function
"""
self.name = name
self.n_params = n_params
self.f_model = f_model
if f_init_params is not None:
self.f_init_params = f_init_params
else:
# Default initial parameters: random values between 0 and 1
self.f_init_params = _get_f_init_params_default(n_params)
if f_bounds is not None:
self.f_bounds = f_bounds
else:
self.f_bounds = _get_f_bounds_default(n_params)
if l_f_validate_input is None:
self.l_f_validate_input = [_f_validate_input_default]
else:
self.l_f_validate_input = _as_list(l_f_validate_input)
if l_cache_vars is None:
self.l_cache_vars = []
else:
self.l_cache_vars = _as_list(l_cache_vars)
if dict_f_cache is None:
self.dict_f_cache = dict()
else:
self.dict_f_cache = dict_f_cache
# TODO - REMOVE THIS - ASSUME NORMALIZED INPUT
def _get_f_init_params_validated(f_init_params):
# Adds argument validation to a parameter initialisation function
def f_init_params_validated(
a_x=None, a_y=None, a_date=None, is_mult=False):
if a_x is not None and pd.isnull(a_x).any():
raise ValueError('a_x cannot have null values')
return f_init_params(a_x, a_y, a_date, is_mult)
return f_init_params_validated
# Add logic to f_init_params that validates input
self.f_init_params = _get_f_init_params_validated(self.f_init_params)
def __call__(self, a_x, a_date, params, is_mult=False, **kwargs):
# assert len(params)==self.n_params
return self.f_model(a_x, a_date, params, is_mult, **kwargs)
def __str__(self):
return self.name
def __repr__(self):
return 'ForecastModel:{}'.format(self.name)
def __add__(self, forecast_model):
# Check for nulls
if self.name == 'null':
return forecast_model
if forecast_model.name == 'null':
return self
name = '({}+{})'.format(self.name, forecast_model.name)
n_params = self.n_params + forecast_model.n_params
f_model = _get_f_add_2_f_models(self, forecast_model)
f_init_params = _get_f_add_2_f_init_params(
self.f_init_params, forecast_model.f_init_params)
f_bounds = _get_f_concat_2_bounds(self, forecast_model)
l_f_validate_input = list(
set(self.l_f_validate_input + forecast_model.l_f_validate_input))
# Combine both dicts
dict_f_cache = self.dict_f_cache.copy()
dict_f_cache.update(forecast_model.dict_f_cache)
l_cache_vars = list(
set(self.l_cache_vars + forecast_model.l_cache_vars))
return ForecastModel(
name,
n_params,
f_model,
f_init_params,
f_bounds=f_bounds,
l_f_validate_input=l_f_validate_input,
l_cache_vars=l_cache_vars,
dict_f_cache=dict_f_cache
)
def __radd__(self, other):
return self.__add__(other)
def __mul__(self, forecast_model):
if self.name == 'null':
return forecast_model
if forecast_model.name == 'null':
return self
name = '({}*{})'.format(self.name, forecast_model.name)
n_params = self.n_params + forecast_model.n_params
f_model = _get_f_mult_2_f_models(self, forecast_model)
f_init_params = _get_f_mult_2_f_init_params(
self.f_init_params, forecast_model.f_init_params)
f_bounds = _get_f_concat_2_bounds(self, forecast_model)
l_f_validate_input = list(
set(self.l_f_validate_input + forecast_model.l_f_validate_input))
# Combine both dicts
dict_f_cache = self.dict_f_cache.copy()
dict_f_cache.update(forecast_model.dict_f_cache)
l_cache_vars = list(
set(self.l_cache_vars + forecast_model.l_cache_vars))
return ForecastModel(
name,
n_params,
f_model,
f_init_params,
f_bounds=f_bounds,
l_f_validate_input=l_f_validate_input,
l_cache_vars=l_cache_vars,
dict_f_cache=dict_f_cache
)
def __rmul__(self, other):
return self.__mul__(other)
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.name == other.name
return NotImplemented
def __ne__(self, other):
x = self.__eq__(other)
if x is not NotImplemented:
return not x
return NotImplemented
def __hash__(self):
return hash(self.name)
def __lt__(self, other):
return self.name < other.name
def validate_input(self, a_x, a_y, a_date):
try:
l_result = [f_validate_input(a_x, a_y, a_date)
for f_validate_input in self.l_f_validate_input]
except AssertionError:
return False
return True
def init_cache(self, a_x, a_date):
dict_cache_vars = dict()
for k in self.l_cache_vars:
f = _dict_f_cache.get(k)
if f:
dict_cache_vars[k] = f(a_x, a_date)
else:
logger.warning('Cache function not found: %s', k)
# Search vars defined in internal cache function dictionary
for k in self.dict_f_cache:
f = self.dict_f_cache.get(k)
if f:
dict_cache_vars[k] = f(a_x, a_date)
else:
logger.warning('Cache function not found: %s', k)
return dict_cache_vars
# - Null model: 0
def _f_model_null(a_x, a_date, params, is_mult=False, **kwargs):
# This model does nothing - used to disable model components
# (e.g. seasonality) when adding/multiplying multiple functions
return float(is_mult) # Returns 1 if multiplying, 0 if adding
model_null = ForecastModel('null', 0, _f_model_null)
# - Constant model: :math:`Y = A`
def _f_model_constant(a_x, a_date, params, is_mult=False, **kwargs):
[A] = params
y = np.full(len(a_x), A)
return y
def _f_init_params_constant(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, 1)
else:
return np.nanmean(a_y) + np.random.uniform(0, 1, 1)
model_constant = ForecastModel(
'constant',
1,
_f_model_constant,
_f_init_params_constant)
# - Naive model: Y = Y(x-1)
# Note: This model requires passing the actuals data - it is not fitted by
# regression. We still pass it to forecast.fit_model() for consistency
# with the rest of the library
def _f_model_naive(a_x, a_date, params, is_mult=False, df_actuals=None):
if df_actuals is None:
raise ValueError('model_naive requires a df_actuals argument')
df_out_tmp = pd.DataFrame({'date': a_date, 'x': a_x})
df_out = (
# This is not really intended to work with multiple values per sample
df_actuals.drop_duplicates('x')
.merge(df_out_tmp, how='outer')
.sort_values('x')
)
df_out['y'] = (
df_out.y.shift(1)
.fillna(method='ffill')
.fillna(method='bfill')
)
df_out = df_out.loc[df_out.x.isin(a_x)]
# df_out = df_out_tmp.merge(df_out, how='left')
# TODO: CHECK THAT X,DATE order is preserved
# TODO: df_out = df_out.merge(df_out_tmp, how='right')
return df_out.y.values
model_naive = ForecastModel('naive', 0, _f_model_naive)
# - Seasonal naive model
# Note: This model requires passing the actuals data - it is not fitted by
# regression. We still pass it to forecast.fit_model() for consistency
# with the rest of the library
def _fillna_wday(df):
"""
In a time series, shift samples by 1 week
and fill gaps with data from same weekday
"""
def add_col_y_out(df):
df = df.assign(y_out=df.y.shift(1).fillna(method='ffill'))
return df
df_out = (
df
.assign(wday=df.date.dt.weekday)
.groupby('wday', as_index=False).apply(add_col_y_out)
.sort_values(['x'])
.reset_index(drop=True)
)
return df_out
def _f_model_snaive_wday(a_x, a_date, params, is_mult=False, df_actuals=None):
"""Naive model - takes last valid weekly sample"""
if df_actuals is None:
raise ValueError('model_snaive_wday requires a df_actuals argument')
# df_actuals_model - table with actuals samples,
# adding y_out column with naive model values
df_actuals_model = _fillna_wday(df_actuals.drop_duplicates('x'))
# df_last_week - table with naive model values from last actuals week,
# to use in extrapolation
df_last_week = (
df_actuals_model
# Fill null actual values with data from previous weeks
.assign(y=df_actuals_model.y.fillna(df_actuals_model.y_out))
.drop_duplicates('wday', keep='last')
[['wday', 'y']]
.rename(columns=dict(y='y_out'))
)
# Generate table with extrapolated samples
df_out_tmp = pd.DataFrame({'date': a_date, 'x': a_x})
df_out_tmp['wday'] = df_out_tmp.date.dt.weekday
df_out_extrapolated = (
df_out_tmp
.loc[~df_out_tmp.date.isin(df_actuals_model.date)]
.merge(df_last_week, how='left')
.sort_values('x')
)
# Filter actuals table - only samples in a_x, a_date
df_out_actuals_filtered = (
# df_actuals_model.loc[df_actuals_model.x.isin(a_x)]
# Using merge rather than simple filtering to account for
# dates with multiple samples
df_actuals_model.merge(df_out_tmp, how='inner')
.sort_values('x')
)
df_out = (
pd.concat(
[df_out_actuals_filtered, df_out_extrapolated],
sort=False, ignore_index=True)
)
return df_out.y_out.values
model_snaive_wday = ForecastModel('snaive_wday', 0, _f_model_snaive_wday)
# - Spike model: :math:`Y = A`, when x_min <= X < x_max
def _f_model_spike(a_x, a_date, params, is_mult=False, **kwargs):
[A, x_min, x_max] = params
if is_mult:
c = 1
else:
c = 0
y = np.concatenate((
np.full(int(x_min), c),
np.full(int(x_max - x_min), A),
np.full(len(a_x) - int(x_max), c)
))
return y
def _f_init_params_spike(a_x=None, a_y=None, a_date=None, is_mult=False):
""" params are spike height, x start, x end """
# if not a_y.any():
if a_y is None:
return [1] + np.random.uniform(0, 1, 1) + [2]
else:
diffs = np.diff(a_y)
# if diffs:
if True:
diff = max(diffs)
x_start = np.argmax(diffs)
x_end = x_start + 1
return np.array([diff, x_start, x_end])
model_spike = ForecastModel('spike', 3, _f_model_spike, _f_init_params_spike)
# - Spike model for dates - dates are fixed for each model
def _f_model_spike_date(
a_x,
a_date,
params,
date_start,
date_end,
is_mult=False):
[A] = params
mask_spike = (a_date >= date_start) * (a_date < date_end)
if is_mult:
y = mask_spike * A + ~mask_spike
else:
y = mask_spike * A
return y
def _f_init_params_spike(a_x=None, a_y=None, a_date=None, is_mult=False):
""" params are spike height, x start, x end """
if a_y is None:
return np.concatenate([np.array([1]) + np.random.uniform(0, 1, 1)])
else:
diffs = np.diff(a_y)
# if diffs:
if True:
diff = max(diffs)
return np.array([diff])
# else:
# rand = np.random.randint(1, len(a_y) - 1)
# return [1]
def get_model_spike_date(date_start, date_end):
f_model = (
lambda a_x, a_date, params, is_mult=False, **kwargs:
_f_model_spike_date(a_x, a_date, params, date_start, date_end, is_mult)
)
model_spike_date = ForecastModel(
'spike_date[{},{}]'.format(
pd.to_datetime(date_start).date(),
pd.to_datetime(date_end).date()),
1,
f_model,
_f_init_params_spike)
return model_spike_date
# - Linear model: :math:`Y = A*x + B`
def _f_model_linear(a_x, a_date, params, is_mult=False, **kwargs):
(A, B) = params
y = A * a_x + B
return y
def _f_init_params_linear(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.random.uniform(low=0, high=1, size=2)
else: # TODO: Improve this
if a_x is not None:
a_x_size = np.unique(a_x).size - 1
else:
a_x_size = a_y.size - 1
A = (a_y[-1] - a_y[0]) / a_x_size
B = a_y[0]
# Uniform low= 0*m, high = 1*m
return np.array([A, B])
model_linear = ForecastModel(
'linear',
2,
_f_model_linear,
_f_init_params_linear)
def f_init_params_linear_nondec(
a_x=None,
a_y=None,
a_date=None,
is_mult=False):
params = _f_init_params_linear(a_x, a_y, a_date)
if params[0] < 0:
params[0] = 0
return params
def f_bounds_linear_nondec(a_x=None, a_y=None, a_date=None):
# first param should be between 0 and inf
return [0, -np.inf], [np.inf, np.inf]
model_linear_nondec = ForecastModel('linear_nondec', 2, _f_model_linear,
f_init_params=f_init_params_linear_nondec,
f_bounds=f_bounds_linear_nondec)
# - QuasiLinear model: :math:`Y = A t^{B} + C`
def _f_model_quasilinear(a_x, a_date, params, is_mult=False, **kwargs):
(A, B, C) = params
y = A * np.power(a_x, B) + C
return y
model_quasilinear = ForecastModel('quasilinear', 3, _f_model_quasilinear)
# - Exponential model: math:: Y = A * B^t
# TODO: Deprecate - not safe to use
def _f_model_exp(a_x, a_date, params, is_mult=False, **kwargs):
(A, B) = params
y = A * np.power(B, a_x)
return y
model_exp = ForecastModel('exponential', 2, _f_model_exp)
# - Exponential decay model: math:: Y = A * e^(B*(x-C)) + D
def _f_model_decay(a_x, a_date, params, is_mult=False, **kwargs):
(A, B, D) = params
y = A * np.exp(B * (a_x)) + D
return y
def _f_validate_input_decay(a_x, a_y, a_date):
assert (a_y > 0).all()
def f_init_params_decay(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.array([0, 0, 0])
A = a_y[0] - a_y[-1]
B = np.log(np.min(a_y) / np.max(a_y)) / (len(a_y) - 1)
if B > 0 or B == -np.inf:
B = -0.5
C = a_y[-1]
return np.array([A, B, C])
def f_bounds_decay(a_x=None, a_y=None, a_date=None):
return [-np.inf, -np.inf, -np.inf], [np.inf, 0, np.inf]
model_decay = ForecastModel('decay', 3, _f_model_decay,
f_init_params=f_init_params_decay,
f_bounds=f_bounds_decay,
l_f_validate_input=_f_validate_input_decay)
# - Step function: :math:`Y = {0, if x < A | B, if x >= A}`
# A is the time of step, and B is the step
def _f_step(a_x, a_date, params, is_mult=False, **kwargs):
(A, B) = params
if is_mult:
y = 1 + (B - 1) * np.heaviside(a_x - A, 1)
else:
y = B * np.heaviside(a_x - A, 1)
return y
# TODO: Implement initialisation for multiplicative composition
def _f_init_params_step(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, 2)
else:
if a_y.ndim > 1:
a_y = a_y[:, 0]
df = pd.DataFrame({'b': a_y})
# max difference between consecutive values
df['diff'] = df.diff().abs()
# if is_mult, replace above line with something like
# np.concatenate([[np.NaN],a_y[:-1]/a_y[1:]])
a = df.nlargest(1, 'diff').index[0]
b = df['diff'].iloc[a]
return np.array([a, b * 2])
# TODO: Add boundaries for X axis
model_step = ForecastModel('step', 2, _f_step, _f_init_params_step)
# - Spike model for dates - dates are fixed for each model
def _f_model_step_date(a_x, a_date, params, date_start, is_mult=False):
[A] = params
mask_step = (a_date >= date_start).astype(float)
if is_mult:
# y = mask_step*A + ~mask_step
y = mask_step * (A - 1) + 1
else:
y = mask_step * A
return y
# TODO: Implement initialisation for multiplicative composition
def _f_init_params_step_date(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, 1)
else:
if a_y.ndim > 1:
a_y = a_y[:, 0]
df = pd.DataFrame({'b': a_y})
# max difference between consecutive values
df['diff'] = df.diff().abs()
# if is_mult, replace above line with something like
# np.concatenate([[np.NaN],a_y[:-1]/a_y[1:]])
a = df.nlargest(1, 'diff').index[0]
b = df['diff'].iloc[a]
return np.array([b * 2])
def get_model_step_date(date_start):
date_start = pd.to_datetime(date_start)
f_model = (
lambda a_x, a_date, params, is_mult=False, **kwargs:
_f_model_step_date(a_x, a_date, params, date_start, is_mult)
)
model_step_date = ForecastModel('step_date[{}]'.format(date_start.date()),
1, f_model, _f_init_params_step_date)
return model_step_date
# Two step functions
def _f_n_steps(n, a_x, a_date, params, is_mult=False):
if is_mult:
y = 1
else:
y = 0
for i in range(0, n + 1, 2):
A, B = params[i: i + 2]
if is_mult:
y = y * _f_step(a_x, a_date, (A, B), is_mult)
else:
y = y + _f_step(a_x, a_date, (A, B), is_mult)
return y
def _f_two_steps(a_x, a_date, params, is_mult=False, **kwargs):
return _f_n_steps(
n=2,
a_x=a_x,
a_date=a_date,
params=params,
is_mult=is_mult)
def _f_init_params_n_steps(
n=2,
a_x=None,
a_y=None,
a_date=None,
is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, n * 2)
else:
# max difference between consecutive values
if a_y.ndim > 1:
a_y = a_y[:, 0]
df = pd.DataFrame({'b': a_y})
df['diff'] = df.diff().abs()
# if is_mult, replace above line with something like
# np.concatenate([[np.NaN],a_y[:-1]/a_y[1:]])
a = df.nlargest(n, 'diff').index[0:n].values
b = df['diff'].iloc[a].values
params = []
for i in range(0, n):
params += [a[i], b[i]]
return np.array(params)
def _f_init_params_two_steps(a_x=None, a_y=None, a_date=None, is_mult=False):
return _f_init_params_n_steps(
n=2,
a_x=a_x,
a_y=a_y,
a_date=a_date,
is_mult=is_mult)
model_two_steps = ForecastModel(
'two_steps',
2 * 2,
_f_two_steps,
_f_init_params_two_steps)
# - Sigmoid step function: `Y = {A + (B - A) / (1 + np.exp(- D * (a_x - C)))}`
# Spans from A to B, C is the position of the step in x axis
# and D is how steep the increase is
def _f_sigmoid(a_x, a_date, params, is_mult=False, **kwargs):
(B, C, D) = params
if is_mult:
A = 1
else:
A = 0
# TODO check if a_x is negative
y = A + (B - A) / (1 + np.exp(- D * (a_x - C)))
return y
def _f_init_params_sigmoid_step(
a_x=None,
a_y=None,
a_date=None,
is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, 3)
else:
if a_y.ndim > 1:
a_y = a_y[:, 0]
df = pd.DataFrame({'y': a_y})
# max difference between consecutive values
df['diff'] = df.diff().abs()
c = df.nlargest(1, 'diff').index[0]
b = df.loc[c, 'y']
d = b * b
return b, c, d
def _f_init_bounds_sigmoid_step(a_x=None, a_y=None, a_date=None):
if a_y is None:
return [-np.inf, -np.inf, 0.], 3 * [np.inf]
if a_y.ndim > 1:
a_y = a_y[:, 0]
if a_x.ndim > 1:
a_x = a_x[:, 0]
diff = max(a_y) - min(a_y)
b_min = -2 * diff
b_max = 2 * diff
c_min = min(a_x)
c_max = max(a_x)
d_min = 0.
d_max = np.inf
return [b_min, c_min, d_min], [b_max, c_max, d_max]
# In this model, parameter initialization is aware of number of steps
model_sigmoid_step = ForecastModel(
'sigmoid_step',
3,
_f_sigmoid,
_f_init_params_sigmoid_step,
f_bounds=_f_init_bounds_sigmoid_step)
model_sigmoid = ForecastModel('sigmoid', 3, _f_sigmoid)
# Ramp functions - used for piecewise linear models
# example : model_linear_pw2 = model_linear + model_ramp
# example 2: model_linear_p23 = model_linear + model_ramp + model_ramp
# - Ramp function: :math:`Y = {0, if x < A | B, if x >= A}`
# A is the time of step, and B is the step
def _f_ramp(a_x, a_date, params, is_mult=False, **kwargs):
(A, B) = params
if is_mult:
y = 1 + (a_x - A) * (B) * np.heaviside(a_x - A, 1)
else:
y = (a_x - A) * B * np.heaviside(a_x - A, 1)
return y
def _f_init_params_ramp(a_x=None, a_y=None, a_date=None, is_mult=False):
# TODO: set boundaries: a_x (0.2, 0.8)
if a_y is None:
if a_x is not None:
nfirst_last = int(np.ceil(0.15 * a_x.size))
a = np.random.uniform(a_x[nfirst_last], a_x[-nfirst_last - 1], 1)
else:
a = np.random.uniform(0, 1, 1)
b = np.random.uniform(0, 1, 1)
return np.concatenate([a,
b])
else:
# TODO: FILTER A_Y BY 20-80 PERCENTILE IN A_X
df = pd.DataFrame({'b': a_y})
if a_x is not None:
#
df['x'] = a_x
# Required because we support input with multiple samples per x
# value
df = df.drop_duplicates('x')
df = df.set_index('x')
# max difference between consecutive values -- this assumes no null
# values in series
df['diff2'] = df.diff().diff().abs()
# We ignore the last 15% of the time series
skip_samples = int(np.ceil(df.index.size * 0.15))
a = (df.head(-skip_samples).tail(
-skip_samples).nlargest(1, 'diff2').index[0]
)
b = df['diff2'].loc[a]
# TODO: replace b with estimation of slope in segment 2
# minus slope in segment 1 - see init_params_linear
return np.array([a, b])
def _f_init_bounds_ramp(a_x=None, a_y=None, a_date=None):
if a_x is None:
a_min = -np.inf
a_max = np.inf
else:
# a_min = np.min(a_x)
nfirst_last = int(np.ceil(0.15 * a_x.size))
a_min = a_x[nfirst_last]
a_max = a_x[-nfirst_last]
# a_min = np.percentile(a_x, 15)
# a_max = np.percentile(a_x,85)
if a_y is None:
b_min = -np.inf
b_max = np.inf
else:
# TODO: FILTER A_Y BY 20-80 PERCENTILE IN A_X
# df = pd.DataFrame({'b': a_y})
# #max_diff2 = np.max(df.diff().diff().abs())
# max_diff2 = np.max(np.abs(np.diff(np.diff(a_y))))
#
# b_min = -2*max_diff2
# b_max = 2*max_diff2
b_min = -np.inf
b_max = np.inf
# logger_info('DEBUG: BOUNDS:',(a_min, b_min,a_max, b_max))
return ([a_min, b_min], [a_max, b_max])
model_ramp = ForecastModel(
'ramp',
2,
_f_ramp,
_f_init_params_ramp,
_f_init_bounds_ramp)
# - Weekday seasonality
def _f_model_season_wday(
a_x, a_date, params, is_mult=False,
# cache variables
a_weekday=None,
**kwargs):
# Weekday seasonality model, 6 params
# params_long[0] is default series value,
params_long = np.concatenate([[float(is_mult)], params])
if a_weekday is None:
a_weekday = _f_init_cache_a_weekday(a_x, a_date)
return params_long[a_weekday]
def _f_validate_input_season_wday(a_x, a_y, a_date):
assert a_date is not None
assert a_date.weekday.drop_duplicates().size == 7
model_season_wday = ForecastModel(
'season_wday',
6,
_f_model_season_wday,
l_f_validate_input=_f_validate_input_season_wday,
l_cache_vars=['a_weekday']
)
# - Month seasonality
def _f_init_params_season_month(
a_x=None,
a_y=None,
a_date=None,
is_mult=False):
if a_y is None or a_date is None:
return np.random.uniform(low=-1, high=1, size=11)
else: # TODO: Improve this
l_params_long = [np.mean(a_y[a_date.month == i])
for i in np.arange(1, 13)]
l_baseline = l_params_long[-1]
l_params = l_params_long[:-1]
if not is_mult:
l_params_add = l_params - l_baseline
return l_params_add
else:
l_params_mult = l_params / l_baseline
return l_params_mult
def _f_model_season_month(
a_x, a_date, params, is_mult=False,
# cache variables
a_month=None,
**kwargs):
# Month of December is taken as default level, has no parameter
# params_long[0] is default series value
params_long = np.concatenate([[float(is_mult)], params])
if a_month is None:
a_month = _f_init_cache_a_month(a_x, a_date)
return params_long[a_month]
model_season_month = ForecastModel(
'season_month',
11,
_f_model_season_month,
_f_init_params_season_month,
l_cache_vars=['a_month']
)
model_season_month_old = ForecastModel(
'season_month_old', 11, _f_model_season_month)
def _f_model_yearly_season_fourier(
a_x,
a_date,
params,
is_mult=False,
# cache params
a_t_fourier=None,
**kwargs):
if a_t_fourier is None:
a_t_fourier = _f_init_cache_a_t_fourier(None, a_date)
y = np.matmul(params, a_t_fourier)
return y
def _f_init_params_fourier_n_params(
n_params,
a_x=None,
a_y=None,
a_date=None,
is_mult=False):
if a_y is None:
params = np.random.uniform(0.001, 1, n_params)
else:
# max difference in time series
diff = a_y.max() - a_y.min()
params = diff * np.random.uniform(0.001, 1, n_params)
return params
def _f_init_params_fourier(a_x=None, a_y=None, a_date=None, is_mult=False):
n_params = 2 * _dict_fourier_config['harmonics']
return _f_init_params_fourier_n_params(
n_params, a_x=a_x, a_y=a_y, a_date=a_date, is_mult=is_mult)
def _f_init_bounds_fourier_nparams(n_params, a_x=None, a_y=None, a_date=None):
return n_params * [-np.inf], n_params * [np.inf]
def _f_init_bounds_fourier_yearly(a_x=None, a_y=None, a_date=None):
n_params = 2 * _dict_fourier_config['harmonics']
return _f_init_bounds_fourier_nparams(n_params, a_x, a_y, a_date)
model_season_fourier_yearly = ForecastModel(
name='season_fourier_yearly',
n_params=2 * _dict_fourier_config.get('harmonics'),
f_model=_f_model_yearly_season_fourier,
f_init_params=_f_init_params_fourier,
f_bounds=_f_init_bounds_fourier_yearly,
l_cache_vars='a_t_fourier'
)
def get_fixed_model(forecast_model, params_fixed, is_mult=False):
# Generate model with some fixed parameters
if forecast_model.n_params == 0: # Nothing to do
return forecast_model
if len(params_fixed) != forecast_model.n_params:
err = 'Wrong number of fixed parameters'
raise ValueError(err)
return ForecastModel(
forecast_model.name + '_fixed', 0,
f_model=lambda a_x, a_date, params, is_mult=is_mult, **kwargs:
forecast_model.f_model(
a_x=a_x, a_date=a_date, params=params_fixed, is_mult=is_mult))
def get_iqr_thresholds(s_diff, low=0.25, high=0.75):
# Get thresholds based on inter quantile range
q1 = s_diff.quantile(low)
q3 = s_diff.quantile(high)
iqr = q3 - q1
thr_low = q1 - 1.5 * iqr
thr_hi = q3 + 1.5 * iqr
return thr_low, thr_hi
# TODO: Add option - estimate_outl_size
# TODO: Add option - sigmoid steps
# TODO: ADD option - gaussian spikes
def get_model_outliers(df, window=3):
"""
Identify outlier samples in a time series
:param df: Input time series
:type df: pandas.DataFrame
:param window: The x-axis window to aggregate multiple steps/spikes
:type window: int
:return:
| tuple (mask_step, mask_spike)
| mask_step: True if sample contains a step
| mask_spike: True if sample contains a spike
:rtype: tuple of 2 numpy arrays of booleans
TODO: require minimum number of samples to find an outlier
"""
dfo = df.copy() # dfo - df for outliers
# If df has datetime index, use date logic in steps/spikes
with_dates = 'date' in df.columns
x_col = 'date' if with_dates else 'x'
if df[x_col].duplicated().any():
raise ValueError('Input cannot have multiple values per sample')
# Get the differences
dfo['dif'] = dfo.y.diff()
# We consider as outliers the values that are
# 1.5 * IQR (interquartile range) beyond the quartiles.
# These thresholds are obtained here
thr_low, thr_hi = get_iqr_thresholds(dfo.dif)
# Now identify the changes
dfo['ischange'] = ((dfo.dif < thr_low) | (dfo.dif > thr_hi)).astype(int)
# Whenever there are two or more consecutive changes
# (that is, within `window` samples), we group them together
dfo['ischange_group'] = (
dfo.ischange.rolling(window, win_type=None, center=True).max().fillna(
0).astype(int)
)
# We now have to calculate the difference within the
# same group in order to identify if the consecutive changes
# result in a step, a spike, or both.
# We get the filtered difference
dfo['dif_filt'] = (dfo.dif * dfo.ischange).fillna(0)
# And the absolute value of that
dfo['dif_filt_abs'] = dfo.dif_filt.abs()
dfo['change_group'] = dfo.ischange_group.diff(
).abs().fillna(0).astype(int).cumsum()
# this gets us the average difference of the outliers within each change
# group
df_mean_gdiff = (
dfo.loc[dfo.ischange.astype(bool)].groupby('change_group')[
'dif_filt'].mean().rename('mean_group_diff').reset_index())
# this gets us the average absolute difference of the outliers within each
# change group
df_mean_gdiff_abs = (
dfo.loc[dfo.ischange.astype(bool)].groupby('change_group')[
'dif_filt_abs'].mean().rename(
'mean_group_diff_abs').reset_index()
)
# Merge the differences with the original dfo
dfo = dfo.merge(
df_mean_gdiff,
how='left').merge(
df_mean_gdiff_abs,
how='left')
# Fill missing values with zero -> no change
dfo.mean_group_diff = dfo.mean_group_diff.fillna(0)
dfo.mean_group_diff_abs = dfo.mean_group_diff_abs.fillna(0)
# the change group is a step if the mean_group_diff exceeds the thresholds
dfo['is_step'] = dfo['ischange_group'] & (
((dfo.mean_group_diff < thr_low) | (dfo.mean_group_diff > thr_hi)))
# the change group is a spike if the difference between the
# mean_group_diff_abs and the average mean_group_diff exceeds
# the average threshold value
dfo['is_spike'] = (dfo.mean_group_diff_abs -
dfo.mean_group_diff.abs()) > (thr_hi - thr_low) / 2
# Get the outlier start and end points for each group
df_outl = (
dfo.loc[dfo.ischange.astype(bool)].groupby('change_group').apply(
lambda x: pd.Series(
{'outl_start': x[x_col].iloc[0],
'outl_end': x[x_col].iloc[-1]})).reset_index()
)
if df_outl.empty: # No outliers - nothing to do
return np.full(dfo.index.size, False), np.full(dfo.index.size, False)
dfo = dfo.merge(df_outl, how='left')
# Get the start and end points in dfo
if with_dates:
# Convert to datetime, if we are using dates
dfo['outl_start'] = pd.to_datetime(dfo.outl_start)
dfo['outl_end'] = pd.to_datetime(dfo.outl_end)
# Create the mask for spikes and steps
dfo['mask_spike'] = (dfo['is_spike'] &
(dfo.date >= pd.to_datetime(dfo.outl_start)) &
(dfo.date < pd.to_datetime(dfo.outl_end)))
dfo['mask_step'] = (dfo['is_step'] &
(dfo.date >= pd.to_datetime(dfo.outl_start)) &
(dfo.date <= pd.to_datetime(dfo.outl_end)))
else:
# For non-date x values, we fill na's and convert to int
dfo['outl_start'] = dfo.outl_start.fillna(0).astype(int)
dfo['outl_end'] = dfo.outl_end.fillna(0).astype(int)
# Create the mask for spikes and steps
dfo['mask_spike'] = (dfo['is_spike'] &
(dfo.x >= dfo.outl_start) &
(dfo.x < dfo.outl_end))
dfo['mask_step'] = (dfo['is_step'] &
(dfo.x >= dfo.outl_start) &
(dfo.x <= dfo.outl_end))
return dfo.mask_step.values, dfo.mask_spike.values
def create_fixed_step(diff, x):
# Generate a fixed step model
fixed_params = [x, diff]
return get_fixed_model(model_step, fixed_params)
def create_fixed_spike(diff, x, duration):
# Generate a fixed spike model
fixed_params = [diff, x, x + duration]
return get_fixed_model(model_spike, fixed_params)
def create_fixed_spike_ignored(x, duration):
# Generate a fixed spike ignored model
fixed_params = [0, x, x + duration]
return get_fixed_model(model_spike, fixed_params, is_mult=True)
# Dummy variable models
def _validate_f_dummy(f_dummy):
# Ensures that behaviour of f_dummy matches specs
# Must return array of floats, same length as a_x, with values either 0.
# or 1.
def validate_for_dummy(a_dummy):
assert isinstance(a_dummy, np.ndarray)
assert np.setdiff1d(a_dummy, np.array([0., 1.])).size == 0
# validate_for_dummy(f_dummy(np.arange(0, 10), None)) # Crashes with
# f_dummy 's that require dates
validate_for_dummy(
f_dummy(
np.arange(
0, 10), pd.date_range(
'2018-01-01', '2018-01-10')))
def _get_f_dummy(dummy):
"""
Get a function that generates a mask array from a dummy variable
:param dummy: dummy variable, that can be used to generate a mask array
:type dummy: function, pandas Holiday/Calendar,
or list-like of numerics or dates
:return: model function based on dummy variable, to use on a ForecastModel
:rtype: function
"""
if callable(dummy): # If dummy is a function, use it
f_dummy = dummy
elif isinstance(dummy, Holiday):
f_dummy = _get_f_dummy_from_holiday(dummy)
elif isinstance(dummy, AbstractHolidayCalendar):
f_dummy = _get_f_dummy_from_calendar(dummy)
else:
# If dummy is a list, convert to function
f_dummy = _get_f_dummy_from_list(dummy)
return f_dummy
def _get_f_dummy_from_list(list_check):
"""
Generate a f_dummy function that defines a dummy variable, can be used
for dummy models
:param list_check: Input list
:type list_check: list-like of numerics or datetime-likes
:return: f_dummy
:rtype: function
"""
# Generate a f_dummy function that defines a dummy variable, can be used
# for dummy models
s_check = pd.Series(list_check)
assert s_check.size, 'Input list cannot be empty'
if pd.api.types.is_numeric_dtype(s_check):
list_check_numeric = s_check
def f_dummy_list_numeric(a_x, a_date):
# return a_x in check_numeric
return np.isin(a_x, list_check_numeric).astype(float)
return f_dummy_list_numeric
else:
try:
list_check_date = pd.to_datetime(s_check)
def f_dummy_list_date(a_x, a_date):
# return a_x in check_numeric
return np.isin(a_date, list_check_date).astype(float)
return f_dummy_list_date
except BaseException:
raise ValueError(
'list_dummy must be a list-like with numeric or'
'date-like values: %s', list_check)
def _get_f_dummy_from_calendar(calendar):
# Generate dummy model function from a pandas HolidayCalendar
def f_dummy_calendar(a_x, a_date, **kwargs):
# TODO: If we can pass dict_cal as an argument,
# use pre-loaded list of dates for performance
# TODO: If we can guarantee sorted dates,
# change this to a_date[0], a_date[-1] for performance
list_check_date = calendar.holidays(a_date.min(), a_date.max())
return np.isin(a_date, list_check_date).astype(float)
return f_dummy_calendar
def _get_f_dummy_from_holiday(holiday):
def f_dummy_holiday(a_x, a_date, **kwargs):
# TODO: If we can pass dict_cal as an argument,
# use pre-loaded list of dates for performance
# if dict_cal in kwargs.keys():
# list_check_date = dict_cal.get(holiday.name)
# else:
# TODO: If we can guarantee sorted dates,
# change this to a_date[0], a_date[-1] for performance
list_check_date = holiday.dates(a_date.min(), a_date.max())
return np.isin(a_date, list_check_date).astype(float)
return f_dummy_holiday
def _get_f_model_dummy(f_dummy, mask_name):
"""
Generate a model function for a dummy variable defined by f_dummy
:param dummy: dummy variable, that can be used to generate a mask array
:type dummy: function, pandas Holiday/Calendar,
or list-like of numerics or dates
:return: model function based on dummy variable, to use on a ForecastModel
:rtype: function
"""
def f_model_check(a_x, a_date, params, is_mult=False, **kwargs):
# Uses internal f_check to assign 0 or 1 to each sample
# If f_dummy(x)==1, return A
# If f_dummy(x)==0, return 0 (or 1 if is_mult)
a_mask = kwargs.get(mask_name)
if a_mask is None:
a_mask = f_dummy(a_x, a_date)
[A] = params
if not is_mult:
a_result = A * a_mask
else:
a_result = (A) * a_mask + 1
return a_result
return f_model_check
def get_model_dummy(name, dummy, **kwargs):
"""
Generate a model based on a dummy variable.
:param name: Name of the model
:type name: basestring
:param dummy:
| Can be a function or a list-like.
| If a function, it must be of the form f_dummy(a_x, a_date),
| and return a numpy array of floats
| with the same length as a_x and values that are either 0 or 1.
| If a list-like of numerics, it will be converted to a f_dummy function
| as described above, which will have values of 1 when a_x has one of
| the values in the list, and 0 otherwise. If a list-like of date-likes,
| it will be converted to a f_dummy function as described above, which
| will have values of 1 when a_date has one of the values in the list,
| and 0 otherwise.
:type dummy: function, or list-like of numerics or datetime-likes
:param kwargs:
:type kwargs:
:return:
| A model that returns A when dummy is 1, and 0 (or 1 if is_mult==True)
| otherwise.
:rtype: ForecastModel
"""
mask_name = 'mask_' + name
f_dummy = _get_f_dummy(dummy)
_validate_f_dummy(f_dummy)
f_model_dummy = _get_f_model_dummy(f_dummy, mask_name)
dict_f_cache = {mask_name: f_dummy}
return ForecastModel(
name, 1, f_model_dummy, dict_f_cache=dict_f_cache, **kwargs)
model_season_wday_2 = get_model_dummy(
'season_wday_2', lambda a_x, a_date, **kwargs:
(a_date.weekday < 5).astype(float))
# Example dummy model - checks if it is Christmas
model_dummy_christmas = get_model_dummy(
'dummy_christmas', lambda a_x, a_date, **kwargs:
((a_date.month == 12) & (a_date.day == 25)).astype(float))
# Example dummy model - checks if it is first day of month
model_dummy_month_start = get_model_dummy(
'dummy_month_start', lambda a_x, a_date, **kwargs:
(a_date.day == 1).astype(float))
class CalendarBankHolUK(AbstractHolidayCalendar):
rules = [
GoodFriday,
EasterMonday,
# Early May Bank Holiday - first Monday in May
Holiday('Early May Bank Holiday', month=5, day=1,
offset=DateOffset(weekday=MO(1))
),
# Spring Bank Holiday - Last Monday in May
Holiday('Spring Bank Holiday', month=5, day=31,
offset=DateOffset(weekday=MO(-1))
),
# August Bank holiday - Last Monday in August
Holiday('August Bank Holiday', month=8, day=30,
offset=DateOffset(weekday=MO(-1))
)
]
class CalendarChristmasUK(AbstractHolidayCalendar):
rules = [
Holiday('New Year\'s Day', month=1, day=1, observance=next_monday),
Holiday('Christmas', month=12, day=25, observance=next_monday),
Holiday('Boxing Day', month=12, day=26,
observance=next_monday_or_tuesday),
]
# Bank Holidays for Italy
class CalendarBankHolIta(AbstractHolidayCalendar):
rules = [
EasterMonday,
Holiday('Festa della Liberazione', month=4, day=25),
Holiday('Festa del lavoro', month=5, day=1),
Holiday('Festa della Repubblica', month=6, day=2),
Holiday('Ferragosto', month=8, day=15),
Holiday('Tutti i Santi', month=11, day=1),
Holiday('Immacolata Concezione', month=12, day=8),
]
class CalendarChristmasIta(AbstractHolidayCalendar):
rules = [
Holiday('New Year\'s Day', month=1, day=1, observance=next_monday),
Holiday('Christmas', month=12, day=25, observance=next_monday),
Holiday('Santo Stefano', month=12, day=26,
observance=next_monday_or_tuesday),
Holiday('Epiphany', month=1, day=6, observance=next_monday),
]
def get_model_from_calendars(l_calendar, name=None):
"""
Create a ForecastModel based on a list of pandas Calendars.
:param calendar:
:type calendar: pandas.tseries.AbstractHolidayCalendar
:return: model based on the input calendar
:rtype: ForecastModel
In pandas, Holidays and calendars provide a simple way to define
holiday rules, to be used in any analysis that requires a predefined
set of holidays. This function converts a Calendar object into a
ForecastModel that assigns a parameter to each calendar rule.
As an example, a Calendar with 1 rule defining Christmas dates
generates a model with a single parameter, which
determines the amount added/multiplied to samples falling on Christmas.
A calendar with 2 rules for Christmas and New Year will have two parameters
- the first one applying to samples in Christmas, and the second
one applying to samples in New Year.
Usage::
from pandas.tseries.holiday import USFederalHolidayCalendar
model_calendar = get_model_from_calendar(USFederalHolidayCalendar())
"""
if isinstance(l_calendar, AbstractHolidayCalendar):
l_calendar = [l_calendar]
# Filter out calendars without rules
l_calendar = [calendar for calendar in l_calendar if calendar.rules]
assert len(l_calendar), 'Need 1+ valid calendars'
if name is None:
name = l_calendar[0].name
l_model_dummy = [get_model_dummy(calendar.name, calendar)
for calendar in l_calendar]
f_model_prod = np.prod(l_model_dummy)
f_model_sum = np.sum(l_model_dummy)
def _f_init_params_calendar(
a_x=None, a_y=None, a_date=None, is_mult=False):
if is_mult:
return np.ones(len(l_model_dummy))
else:
return np.zeros(len(l_model_dummy))
def _f_model_calendar(a_x, a_date, params, is_mult=False, **kwargs):
f_all_dummies = f_model_prod if is_mult else f_model_sum
return f_all_dummies(a_x, a_date, params, is_mult, **kwargs)
model_calendar = ForecastModel(
name,
len(l_model_dummy),
_f_model_calendar,
_f_init_params_calendar,
l_cache_vars=f_model_sum.l_cache_vars,
dict_f_cache=f_model_sum.dict_f_cache
)
return model_calendar
model_calendar_uk = get_model_from_calendars(
[CalendarChristmasUK(), CalendarBankHolUK()], 'calendar_uk')
model_calendar_us = get_model_from_calendars(USFederalHolidayCalendar(),
'calendar_us')
# Calendar for Italy
model_calendar_ita = get_model_from_calendars(
[CalendarChristmasIta(), CalendarBankHolIta()], 'calendar_uk')
def get_model_from_datelist(name=None, *args):
"""
Create a ForecastModel based on one or more lists of dates.
:param name: Model name
:type name: str
:param args: Each element in args is a list of dates.
:type args:
:return: model based on the input lists of dates
:rtype: ForecastModel
Usage::
model_datelist1=get_model_from_date_list('datelist1',
[date1, date2, date3])
model_datelists23 = get_model_from_date_list('datelists23',
[date1, date2], [date3, date4])
In the example above, model_datelist1 will have one parameter, which
determines the amount added/multiplied to samples with dates matching
either date1, date2 or date3. model_datelists23 will have two parameters
- the first one applying to samples in date1 and date2, and the second
one applying to samples in date 3 and date4
"""
l_model_dummy = [get_model_dummy('model_dummy', pd.to_datetime(l_date))
for l_date in args]
assert (len(l_model_dummy)), 'Need 1+ lists of dates'
f_model_prod = np.prod(l_model_dummy)
f_model_sum = np.sum(l_model_dummy)
def _f_init_params_date_list(
a_x=None, a_y=None, a_date=None, is_mult=False):
if is_mult:
return np.ones(len(l_model_dummy))
else:
return np.zeros(len(l_model_dummy))
def _f_model_date_list(a_x, a_date, params, is_mult=False, **kwargs):
f_all_dummies = f_model_prod if is_mult else f_model_sum
return f_all_dummies(a_x, a_date, params, is_mult, **kwargs)
model_date_list = ForecastModel(
name,
len(l_model_dummy),
_f_model_date_list,
_f_init_params_date_list
)
return model_date_list
# Utility functions
def fix_params_fmodel(forecast_model, l_params_fixed):
"""
Given a forecast model and a list of floats, modify the model so that some
of its parameters become fixed
:param forecast_model: Input model
:type forecast_model: ForecastModel
:param l_params_fixed: List of floats with same length as number of
parameters in model. For each element, a non-null value means
that the parameter in that position is fixed to that value.
A null value means that the parameter in that position is not fixed.
:type l_params_fixed: list
:return: A forecast model with a number of parameters equal to the number
of null values in l_params_fixed, with f_model modified so that some
of its parameters gain fixed values equal to the non-null values
in l_params
:rtype: ForecastModel
"""
assert len(l_params_fixed) == forecast_model.n_params
l_params_fixed = np.array(l_params_fixed)
a_null = np.isnan(l_params_fixed)
i_null = np.nonzero(a_null)
name = '{}_fixed_{}'.format(
forecast_model.name,
str(l_params_fixed).replace(
'nan',
':'))
n_params = len(i_null[0])
def f_model_fixed(a_x, a_date, params, is_mult=False, **kwargs):
params_long = l_params_fixed
params_long[i_null] = params
return forecast_model.f_model(a_x, a_date, params_long, is_mult)
def f_init_params_fixed(a_x=None, a_y=None, a_date=None, is_mult=False):
# return params short
params_init = forecast_model.f_init_params(a_x, a_y, a_date, is_mult)
params_init_short = np.array(params_init)[i_null]
return params_init_short
def f_bounds_fixed(a_x=None, a_y=None, a_date=None):
# return f_bounds short
bounds_min, bounds_max = forecast_model.f_bounds(a_x, a_y, a_date)
bounds_min_short = np.array(bounds_min)[i_null]
bounds_max_short = np.array(bounds_max)[i_null]
return bounds_min_short, bounds_max_short
model_result = ForecastModel(
name,
n_params,
f_model_fixed,
f_init_params_fixed,
f_bounds_fixed)
return model_result
def simplify_model(f_model, a_x=None, a_y=None, a_date=None):
"""
Check a model's bounds, and update model to make parameters fixed if their
min and max bounds are equal
:param f_model: Input model
:type f_model: ForecastModel
:param a_x: X axis for model function.
:type a_x: numpy array of floats
:param a_y: Input time series values, to compare to the model function
:type a_y: numpy array of floats
:param a_date: Dates for the input time series
:type a_date: numpy array of datetimes
:return: Model with simplified parameters based on bounds
:rtype: ForecastModel
"""
bounds_min, bounds_max = f_model.f_bounds(a_x, a_y, a_date)
bounds_diff = np.array(bounds_max) - np.array(bounds_min)
i_diff_zero = np.nonzero(bounds_diff == 0)
# For any parameter, if bounds_min == bounds_max, that parameter becomes
# fixed
if i_diff_zero[0].size == 0:
return f_model
else: # We make parameters fixed if their min and max bounds are equal
params_fixed = np.full(f_model.n_params, np.NaN)
params_fixed[i_diff_zero, ] = bounds_max[i_diff_zero, ]
f_model = fix_params_fmodel(f_model, params_fixed)
logger.info(
'Some min and max bounds are equal - generating fixed model: %s',
f_model.name)
return f_model
def validate_initial_guess(initial_guess, bounds):
# Check that initial parameter values fall within model bounds
initial_guess = np.array(initial_guess)
bounds_min, bounds_max = bounds
return np.all(
(initial_guess >= bounds_min) & (
initial_guess <= bounds_max))
def get_l_model_auto_season(a_date, min_periods=1.5, season_add_mult='add',
l_season_yearly=None, l_season_weekly=None):
"""
Generates a list of candidate seasonality models for an series of
timestamps
:param a_date: date array of a time series
:type a_date: numpy array of timestamps
:param min_periods: Minimum number of periods required to apply
seasonality
:type min_periods: float
:param season_add_mult: 'add' or 'mult'
:type is_mult: basestring
:return: list of candidate seasonality models
:rtype: list of ForecastModel
"""
s_date = pd.Series(a_date).sort_values().drop_duplicates()
min_date_delta = s_date.diff().min()
max_date_delta = s_date.max() - s_date.min()
if pd.isna(min_date_delta) or pd.isna(max_date_delta):
return [model_null]
use_season_yearly = (
# Need more than a full year
(max_date_delta > | pd.Timedelta(min_periods * 365, unit='d') | pandas.Timedelta |
import pandas as pd
import numpy as np
from churn_const import out_col, no_plot, save_path, schema_data_dict, skip_metrics,key_cols,max_clips,min_valid
def data_load(schema):
data_file = schema_data_dict[schema]
schema_save_path = save_path(schema) + data_file
churn_data = pd.read_csv(schema_save_path + '.csv')
if data_file in skip_metrics:
churn_data.drop(skip_metrics[data_file], axis=1,inplace=True)
churn_data.set_index(key_cols,inplace=True)
for metric in max_clips.keys():
if metric in churn_data.columns.values:
churn_data[metric].clip(upper=max_clips[metric], inplace=True)
print('%s size before validation of columns: %d' % (data_file, churn_data.shape[0]))
for metric in min_valid.keys():
if metric.lower() in churn_data.columns.values:
churn_data=churn_data[churn_data[metric.lower()]>min_valid[metric]]
print('Loaded %s, size=%dx%d with columns:' % (data_file, churn_data.shape[0], churn_data.shape[1]))
return churn_data
def behavioral_cohort_plot_data(churn_data, var_to_plot,nbin=10,out_col=out_col):
"""
Make a data frame with two columns prepared to be the plot points for a behavioral cohort plot.
The data is binned into 10 ordered bins, and the mean value of the metric and the churn rate are calculated
for each bin.
:param churn_data: Pandas data frame containing the data set
:param var_to_plot: The variable to plot
:param out_col:
:return:
"""
groups = pd.qcut(churn_data[var_to_plot], nbin, duplicates='drop')
midpoints = churn_data.groupby(groups)[var_to_plot].mean()
churns = churn_data.groupby(groups)[out_col].mean()
plot_frame = | pd.DataFrame({var_to_plot: midpoints.values, 'churn_rate': churns}) | pandas.DataFrame |
#!/usr/bin/env python
"""
CreateNetwork: Creates a TF-TF gene regulation network from annotated transcription factor binding sites
@author: <NAME>
@contact: mette.bentsen (at) mpi-bn.mpg.de
@license: MIT
"""
import os
import sys
import argparse
import pyBigWig
import numpy as np
import glob
#import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
#Utils from TOBIAS
from tobias.utils.utilities import *
from tobias.utils.logger import *
#--------------------------------------------------------------------------------#
def add_network_arguments(parser):
parser.formatter_class = lambda prog: argparse.RawDescriptionHelpFormatter(prog, max_help_position=40, width=90)
description = "Creates a TF-TF gene regulation network from annotated transcription factor binding sites"
parser.description = format_help_description("CreateNetwork", description)
parser._action_groups.pop() #pop -h
#Required arguments
required = parser.add_argument_group('Required arguments')
required.add_argument('--TFBS', metavar="", help="File(s) containing TFBS (with annotation) to create network from", nargs="*")
required.add_argument('--origin', metavar="", help="File containing mapping of TF <-> origin gene")
#Optional arguments
optional = parser.add_argument_group('Optional arguments')
optional.add_argument('--start', metavar="", help="Name of node to start in (default: all nodes)")
optional.add_argument('--max-len', metavar="", help="Maximum number of nodes in paths through graph (default: 4)", type=int, default=4)
#optional.add_argument('--unique', action='store_true', help="Only include edges once (default: edges can occur multiple times in case of multiple binding sites)")
runargs = parser.add_argument_group("Run arguments")
runargs.add_argument('--outdir', metavar="", help="Path to output directory (default: tobias_network)", default="tobias_network")
runargs = add_logger_args(runargs)
return(parser)
#--------------------------------------------------------------------------------#
def dfs(adjacency, path, all_paths = [], options={"max_length":3}):
last_node = path[-1]
if last_node in adjacency:
target_nodes = adjacency[last_node].get("targets", [])
if len(target_nodes) > 0:
#Go through all targets and get paths
for target_node in target_nodes:
if target_node in path: #cyclic; add node and close path
new_path = path + [target_node]
all_paths += [new_path]
else: #not cyclic; add node and search for new paths
new_path = path + [target_node]
if len(new_path) == options["max_length"]:
all_paths += [new_path] #Save current path
else:
all_paths = dfs(adjacency, new_path, all_paths, options) #Find targets of last node in path
else:
all_paths += [path]
else: #node is not in adjacency/has no targets; save current path
all_paths += [path]
return all_paths
#--------------------------------------------------------------------------------#
def run_network(args):
make_directory(args.outdir)
check_required(args, ["TFBS", "origin"])
logger = TobiasLogger("CreateNetwork", args.verbosity)
logger.begin()
#-------------------------- Origin file translating motif name -> gene origin -----------------------------------#
#translation file, where one motif can constitute more than one gene (jun::fos)
#and more genes can encode transcription factors with same motifs (close family members with same target sequence)
origin_table = | pd.read_csv(args.origin, sep="\t", header=None) | pandas.read_csv |
#
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing out the datacompy functionality
"""
import io
import logging
import sys
from datetime import datetime
from decimal import Decimal
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from pytest import raises
import datacompy
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def test_numeric_columns_equal_abs():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_numeric_columns_equal_rel():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |False
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |False
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|True
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_date_columns_equal():
data = """a|b|expected
2017-01-01|2017-01-01|True
2017-01-02|2017-01-02|True
2017-10-01|2017-10-10|False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_unequal():
"""I want datetime fields to match with dates stored as strings
"""
df = pd.DataFrame([{"a": "2017-01-01", "b": "2017-01-02"}, {"a": "2017-01-01"}])
df["a_dt"] = pd.to_datetime(df["a"])
df["b_dt"] = pd.to_datetime(df["b"])
assert datacompy.columns_equal(df.a, df.a_dt).all()
assert datacompy.columns_equal(df.b, df.b_dt).all()
assert datacompy.columns_equal(df.a_dt, df.a).all()
assert datacompy.columns_equal(df.b_dt, df.b).all()
assert not datacompy.columns_equal(df.b_dt, df.a).any()
assert not datacompy.columns_equal(df.a_dt, df.b).any()
assert not datacompy.columns_equal(df.a, df.b_dt).any()
assert not datacompy.columns_equal(df.b, df.a_dt).any()
def test_bad_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[{"a": "2017-01-01", "b": "2017-01-01"}, {"a": "2017-01-01", "b": "217-01-01"}]
)
df["a_dt"] = pd.to_datetime(df["a"])
assert not datacompy.columns_equal(df.a_dt, df.b).any()
def test_rounded_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.000000", "exp": True},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.123456", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:01.000000", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00", "exp": True},
]
)
df["a_dt"] = pd.to_datetime(df["a"])
actual = datacompy.columns_equal(df.a_dt, df.b)
expected = df["exp"]
assert_series_equal(actual, expected, check_names=False)
def test_decimal_float_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": False},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_float_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": True},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": False},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_infinity_and_beyond():
df = pd.DataFrame(
[
{"a": np.inf, "b": np.inf, "expected": True},
{"a": -np.inf, "b": -np.inf, "expected": True},
{"a": -np.inf, "b": np.inf, "expected": False},
{"a": np.inf, "b": -np.inf, "expected": False},
{"a": 1, "b": 1, "expected": True},
{"a": 1, "b": 0, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1", "expected": False},
{"a": 1, "b": "yo", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces_and_case():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
{"a": "Hi", "b": "hI ", "expected": True},
{"a": "HI", "b": "HI ", "expected": True},
{"a": "hi", "b": "hi ", "expected": True},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True, ignore_case=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_compare_df_setter_bad():
df = pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}])
with raises(TypeError, match="df1 must be a pandas DataFrame"):
compare = datacompy.Compare("a", "a", ["a"])
with raises(ValueError, match="df1 must have all columns from join_columns"):
compare = datacompy.Compare(df, df.copy(), ["b"])
with raises(ValueError, match="df1 must have unique column names"):
compare = datacompy.Compare(df, df.copy(), ["a"])
df_dupe = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 3}])
assert datacompy.Compare(df_dupe, df_dupe.copy(), ["a", "b"]).df1.equals(df_dupe)
def test_compare_df_setter_good():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"A": 1, "B": 2}, {"A": 2, "B": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
assert compare.join_columns == ["a"]
compare = datacompy.Compare(df1, df2, ["A", "b"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
assert compare.join_columns == ["a", "b"]
def test_compare_df_setter_different_cases():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"A": 1, "b": 2}, {"A": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
def test_compare_df_setter_bad_index():
df = pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}])
with raises(TypeError, match="df1 must be a pandas DataFrame"):
compare = datacompy.Compare("a", "a", on_index=True)
with raises(ValueError, match="df1 must have unique column names"):
compare = datacompy.Compare(df, df.copy(), on_index=True)
def test_compare_on_index_and_join_columns():
df = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
with raises(Exception, match="Only provide on_index or join_columns"):
compare = datacompy.Compare(df, df.copy(), on_index=True, join_columns=["a"])
def test_compare_df_setter_good_index():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, on_index=True)
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
def test_columns_overlap():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1_unq_columns() == set()
assert compare.df2_unq_columns() == set()
assert compare.intersect_columns() == {"a", "b"}
def test_columns_no_overlap():
df1 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}])
df2 = pd.DataFrame([{"a": 1, "b": 2, "d": "oh"}, {"a": 2, "b": 3, "d": "ya"}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1_unq_columns() == {"c"}
assert compare.df2_unq_columns() == {"d"}
assert compare.intersect_columns() == {"a", "b"}
def test_10k_rows():
df1 = pd.DataFrame(np.random.randint(0, 100, size=(10000, 2)), columns=["b", "c"])
df1.reset_index(inplace=True)
df1.columns = ["a", "b", "c"]
df2 = df1.copy()
df2["b"] = df2["b"] + 0.1
compare_tol = datacompy.Compare(df1, df2, ["a"], abs_tol=0.2)
assert compare_tol.matches()
assert len(compare_tol.df1_unq_rows) == 0
assert len(compare_tol.df2_unq_rows) == 0
assert compare_tol.intersect_columns() == {"a", "b", "c"}
assert compare_tol.all_columns_match()
assert compare_tol.all_rows_overlap()
assert compare_tol.intersect_rows_match()
compare_no_tol = datacompy.Compare(df1, df2, ["a"])
assert not compare_no_tol.matches()
assert len(compare_no_tol.df1_unq_rows) == 0
assert len(compare_no_tol.df2_unq_rows) == 0
assert compare_no_tol.intersect_columns() == {"a", "b", "c"}
assert compare_no_tol.all_columns_match()
assert compare_no_tol.all_rows_overlap()
assert not compare_no_tol.intersect_rows_match()
@mock.patch("datacompy.logging.debug")
def test_subset(mock_debug):
df1 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}])
df2 = pd.DataFrame([{"a": 1, "c": "hi"}])
comp = datacompy.Compare(df1, df2, ["a"])
assert comp.subset()
assert mock_debug.called_with("Checking equality")
@mock.patch("datacompy.logging.info")
def test_not_subset(mock_info):
df1 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}])
df2 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "great"}])
comp = datacompy.Compare(df1, df2, ["a"])
assert not comp.subset()
assert mock_info.called_with("Sample c mismatch: a: 2, df1: yo, df2: great")
def test_large_subset():
df1 = pd.DataFrame(np.random.randint(0, 100, size=(10000, 2)), columns=["b", "c"])
df1.reset_index(inplace=True)
df1.columns = ["a", "b", "c"]
df2 = df1[["a", "b"]].sample(50).copy()
comp = datacompy.Compare(df1, df2, ["a"])
assert not comp.matches()
assert comp.subset()
def test_string_joiner():
df1 = pd.DataFrame([{"ab": 1, "bc": 2}, {"ab": 2, "bc": 2}])
df2 = pd.DataFrame([{"ab": 1, "bc": 2}, {"ab": 2, "bc": 2}])
compare = datacompy.Compare(df1, df2, "ab")
assert compare.matches()
def test_decimal_with_joins():
df1 = pd.DataFrame([{"a": Decimal("1"), "b": 2}, {"a": Decimal("2"), "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
compare = datacompy.Compare(df1, df2, "a")
assert compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_decimal_with_nulls():
df1 = pd.DataFrame([{"a": 1, "b": Decimal("2")}, {"a": 2, "b": Decimal("2")}])
df2 = | pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}, {"a": 3, "b": 2}]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import datetime
class Durations(object):
@classmethod
def set(cls, X, extract_cols, dataset):
print("... ... Durations")
all_df = dataset["all_df"]
# duration from first action to clickout
dffac_df = all_df[["session_id", "timestamp", "timestamp_dt"]].groupby(
"session_id").first().reset_index()
dffac_df = dffac_df[["session_id", "timestamp_dt"]]
dffac_df.columns = ["session_id", "first_timestamp_dt"]
X = pd.merge(X, dffac_df, on="session_id", how="left")
X["session_duration"] = X.apply(lambda x: (x.timestamp_dt - x.first_timestamp_dt).seconds, axis=1)
extract_cols = extract_cols + ["session_duration"]
del dffac_df
# duration from last distination to clickout
dflsc_df = all_df[["session_id", "_session_id", "timestamp", "timestamp_dt"]].groupby(
"_session_id").first().reset_index()
dflsc_df = dflsc_df[dflsc_df._session_id.isin(X._session_id)]
dflsc_df = dflsc_df[["session_id", "timestamp_dt"]]
dflsc_df.columns = ["session_id", "step_first_timestamp_dt"]
X = pd.merge(X, dflsc_df, on="session_id", how="left")
X["step_duration"] = X.apply(lambda x: (x.timestamp_dt - x.step_first_timestamp_dt).seconds, axis=1)
extract_cols = extract_cols + ["step_duration"]
del dflsc_df
return (X, extract_cols)
class JustClickout(object):
@classmethod
def set(cls, X, extract_cols):
print("... ... JustClickout")
# append current fillters
def get_cf_features(x):
sbp = 1 if "Sort by Price" in x.current_filters else 0
sbd = 1 if "Sort By Distance" in x.current_filters else 0
sbr = 1 if "Sort By Rating" in x.current_filters else 0
fod = 1 if "Focus on Distance" in x.current_filters else 0
fsr = 1 if "Focus on Rating" in x.current_filters else 0
bev = 1 if "Best Value" in x.current_filters else 0
return pd.Series({'cf_sbp': sbp
, 'cf_sbd': sbd
, 'cf_sbr': sbr
, 'cf_fod': fod
, 'cf_fsr': fsr
, 'cf_bev': bev})
X["current_filters"] = X["current_filters"].fillna("")
curf_df = X[["current_filters"]].apply(lambda x: get_cf_features(x), axis=1)
X = pd.concat([X, curf_df], axis=1)
extract_cols = extract_cols + list(curf_df.columns)
del curf_df
return (X, extract_cols)
class JustBeforeClickout(object):
@classmethod
def set(cls, X, dataset):
print("... ... JustBeforeClickout")
all_df = dataset["all_df"]
# last action_type
lasttype_df = all_df[["session_id", "action_type", "is_y"]].copy()
lasttype_df["lat"] = lasttype_df["action_type"].shift(1)
lasttype_df["last_session_id"] = lasttype_df["session_id"].shift(1)
lasttype_df = lasttype_df[lasttype_df.is_y == 1]
lasttype_df = lasttype_df[lasttype_df.session_id == lasttype_df.last_session_id]
lasttype_df = lasttype_df[["session_id", "lat"]]
onehot_lat = pd.get_dummies(lasttype_df, columns=['lat'])
X = pd.merge(X, onehot_lat, on="session_id", how="left")
lat_cols = list(onehot_lat.columns)
lat_cols.remove("session_id")
for lat_col in lat_cols:
X[lat_col] = X[lat_col].fillna(0)
del lasttype_df
del onehot_lat
return X
class Record2Impression(object):
@classmethod
def expand(cls, X, extract_cols, dataset):
print("... ... Record2Impression")
# create expanded
X = X.reset_index()
X["gid"] = X.index
X["n_imps"] = X[["impressions"]].apply(lambda x: len(str(x.impressions).split("|")), axis=1)
X["price_mean"] = X[["prices"]].apply(lambda x: np.mean(np.array(str(x.prices).split("|")).astype(int)), axis=1)
X["price_std"] = X[["prices"]].apply(lambda x: np.std(np.array(str(x.prices).split("|")).astype(int)), axis=1)
X["impression"] = X[["impressions"]].apply(lambda x: str(x.impressions).split("|"), axis=1)
X["price"] = X[["prices"]].apply(lambda x: str(x.prices).split("|"), axis=1)
X_impression = X[["gid", "impression"]].set_index('gid').impression.apply(pd.Series).stack().reset_index(
level=0).rename(columns={0: 'impression'})
X_price = X[["gid", "price"]].set_index('gid').price.apply(pd.Series).stack().reset_index(level=0).rename(
columns={0: 'price'})
X_position = X[["gid", "impression"]].set_index('gid').impression.apply(
lambda x: pd.Series(range(len(x)))).stack().reset_index(level=0).rename(columns={0: 'position'})
X_expanded = pd.concat([X_impression, X_price], axis=1)
X_expanded = pd.concat([X_expanded, X_position], axis=1)
X_expanded.columns = ["gid", "impression", "gid2", "price", "gid3", "position"]
X_expanded = X_expanded[["gid", "impression", "price", "position"]]
# join expaned
X = pd.merge(X_expanded, X[["gid", "n_imps", "price_mean", "price_std"] + extract_cols], on="gid", how="left")
# to normalize position and price
X["pos_rate"] = X["position"] / X["n_imps"]
X["pos"] = X["position"] + 1
X["price_norm"] = (X["price"].astype(float) - X["price_mean"].astype(float)) / X["price_std"].astype(float)
# join price_norm rank
pnorm_rank_df = X[["session_id", "price_norm"]].copy()
pnorm_rank_df = pnorm_rank_df[["session_id", "price_norm"]].groupby("session_id").rank(ascending=False)
pnorm_rank_df.columns = ["price_norm_rank"]
X = pd.concat([X, pnorm_rank_df], axis=1)
del pnorm_rank_df
# calc discount rate
X["price"] = X["price"].astype(float)
prices_df = X[["impression", "price"]].groupby("impression").agg({'price': np.mean}).reset_index()
prices_df.columns = ["impression", "item_price_mean"]
X = pd.merge(X, prices_df, on="impression", how="left")
X["discount_rate"] = X["price"] / X["item_price_mean"]
del prices_df
# append some important props and other props with over 0.2 coverage
sum_item_props_df = dataset["sum_item_props_df"]
item_props = dataset["item_props"]
prop_cols = ["pGood Rating"
, "pVery Good Rating"
, "pExcellent Rating"
, "pSatisfactory Rating"
, "p1 Star"
, "p2 Star"
, "p3 Star"
, "p4 Star"
, "p5 Star"
, "pBusiness Centre"
, "pBusiness Hotel"
, "pConference Rooms"]
c02over_prop_cols = sum_item_props_df[sum_item_props_df.coverage >= 0.2]["prop"].tolist()
prop_cols = prop_cols + c02over_prop_cols
prop_cols = list(set(prop_cols))
X = pd.merge(X, item_props[["item_id"] + prop_cols], left_on="impression", right_on="item_id", how="left")
X[prop_cols] = X[prop_cols].fillna(0)
return (X, extract_cols)
class DecisionMakingProcess(object):
@classmethod
def detect(cls, X, dataset):
print("... ... Decision Making Process")
print("... ... ... Attention and Perceptual Encoding")
print("... ... ... Information Acquisition and Evaluation")
all_df = dataset["all_df"]
# join pos stats"
copos_df = all_df[all_df.action_type == "clickout item"][
["session_id", "reference", "impressions", "is_y"]].copy()
copos_df = copos_df[copos_df.is_y == 0]
copos_df["impression"] = copos_df[["impressions"]].apply(lambda x: str(x.impressions).split("|"), axis=1)
copos_df["co_pos"] = copos_df[["impression", "reference"]].apply(
lambda x: x.impression.index(x.reference) + 1 if x.reference in x.impression else 1, axis=1)
copos_df_stats = copos_df[["session_id", "co_pos"]].groupby("session_id").agg(
{'co_pos': [np.min, np.max, np.mean]}).reset_index()
copos_df_stats.columns = ["session_id", "co_pos_min", "co_pos_max", "co_pos_mean"]
X = pd.merge(X, copos_df_stats, on="session_id", how="left")
X["co_pos_min"] = X["co_pos_min"].fillna(1)
X["co_pos_mean"] = X["co_pos_mean"].fillna(1)
X["co_pos_max"] = X["co_pos_max"].fillna(1)
X["co_pos_min_diff"] = X["pos"] - X["co_pos_min"]
X["co_pos_mean_diff"] = X["pos"] - X["co_pos_mean"]
X["clickouted_pos_max_diff"] = X["co_pos_max"] - X["pos"]
del copos_df
del copos_df_stats
# is_last and is_last_elapsed_time
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
lastref_df = all_df[["session_id", "action_type", "reference", "timestamp", "is_y"]].copy()
lastref_df["is_target"] = 0
lastref_df.loc[lastref_df.is_y == 1, "is_target"] = 1
lastref_df = lastref_df[lastref_df.action_type.isin(action_types)]
lastref_df["last_session_id"] = lastref_df["session_id"].shift(1)
lastref_df["last_reference"] = lastref_df["reference"].shift(1)
lastref_df["last_timestamp"] = lastref_df["timestamp"].shift(1)
lastref_df = lastref_df[lastref_df.session_id == lastref_df.last_session_id]
lastref_df = lastref_df[lastref_df.is_target == 1][["session_id", "last_reference", "last_timestamp"]]
X = pd.merge(X, lastref_df, on="session_id", how="left")
X[["last_reference"]] = X[["last_reference"]].fillna("-1")
X[["last_timestamp"]] = X[["last_timestamp"]].fillna(-1)
X["is_last"] = X[["impression", "last_reference"]].apply(lambda x: 1 if x.impression == x.last_reference else 0,
axis=1)
X["elapsed_time_between_is_last"] = X[["impression", "last_reference", "timestamp", "last_timestamp"]].apply(
lambda x: int(x.timestamp) - int(x.last_timestamp) if x.impression == x.last_reference else np.nan, axis=1)
lastdur_df = X[["session_id", "elapsed_time_between_is_last"]].copy()
lastdur_df = lastdur_df.dropna(axis=0, how='any')
X.drop("elapsed_time_between_is_last", axis=1, inplace=True)
X = pd.merge(X, lastdur_df, on="session_id", how="left")
del lastref_df
del lastdur_df
# join is_last_last
lastref_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
lastref_df["last_last_session_id"] = lastref_df["session_id"].shift(2)
lastref_df["last_last_reference"] = lastref_df["reference"].shift(2)
lastref_df = lastref_df[lastref_df.is_y == 1]
lastref_df = lastref_df[lastref_df.session_id == lastref_df.last_last_session_id]
lastref_df = lastref_df[["session_id", "last_last_reference"]]
lastref_df = lastref_df[~lastref_df.duplicated()]
X = pd.merge(X, lastref_df, on="session_id", how="left")
X[["last_last_reference"]] = X[["last_last_reference"]].fillna("-1")
X["is_last_last"] = X[["impression", "last_last_reference"]].apply(
lambda x: 1 if x.impression == x.last_last_reference else 0, axis=1)
del lastref_df
# elapsed next mean by item "it's kind of a future information."
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
isnext_df = all_df[["session_id", "action_type", "reference", "timestamp", "is_y"]].copy()
isnext_df["next_session_id"] = isnext_df["session_id"].shift(-1)
isnext_df["next_timestamp"] = isnext_df["timestamp"].shift(-1)
isnext_df = isnext_df[isnext_df.session_id == isnext_df.next_session_id]
isnext_df["elapsed_next"] = isnext_df["next_timestamp"] - isnext_df["timestamp"]
isnext_df = isnext_df[isnext_df.action_type.isin(action_types)]
isnext_df = isnext_df[isnext_df.is_y == 0]
isnext_gp_df = isnext_df[["reference", "elapsed_next"]].groupby("reference").agg(
{"elapsed_next": np.mean}).reset_index()
isnext_gp_df.columns = ["impression", "next_elapsed_time"]
X = pd.merge(X, isnext_gp_df, on="impression", how="left")
del isnext_gp_df
isnext_gp_df = isnext_df[isnext_df.action_type == "clickout item"][["reference", "elapsed_next"]].groupby(
"reference").agg({"elapsed_next": np.mean}).reset_index()
isnext_gp_df.columns = ["impression", "next_elapsed_time_byco"]
X = pd.merge(X, isnext_gp_df, on="impression", how="left")
del isnext_df
del isnext_gp_df
# clickouted item during session
couted_df = all_df[["action_type", "session_id", "reference", "is_y"]].copy()
couted_df = couted_df[couted_df.action_type == "clickout item"]
couted_df = couted_df[couted_df.is_y == 0] # to prevent leakage
couted_df = couted_df[["session_id", "reference"]]
couted_df.columns = ["session_id", "impression"]
couted_df = couted_df[~couted_df.duplicated()]
couted_df["clickouted"] = 1
X = pd.merge(X, couted_df, on=["session_id", "impression"], how="left")
X["clickouted"] = X["clickouted"].fillna(0)
X["clickouted"] = X["clickouted"].astype(int)
# diff between clickouted price mean
co_price_df = all_df[all_df.action_type == "clickout item"][
["session_id", "reference", "prices", "impressions", "is_y"]].copy()
co_price_df = co_price_df[co_price_df.is_y == 0] # to prevent leakage
def get_price(reference, impressions, prices):
imps = str(impressions).split("|")
prs = str(prices).split("|")
if reference in imps:
return prs[imps.index(reference)]
else:
return 0
co_price_df["price"] = co_price_df.apply(lambda x: get_price(x.reference, x.impressions, x.prices), axis=1)
co_price_df["price"] = co_price_df["price"].astype(float)
co_price_df = co_price_df.groupby("session_id").agg({'price': np.mean}).reset_index()
co_price_df.columns = ["session_id", "couted_price_mean"]
X = pd.merge(X, co_price_df, on="session_id", how="left")
X["couted_price_mean"] = X["couted_price_mean"].fillna(-1)
X["clickouted_price_diff"] = X["price"].astype(float) / X["couted_price_mean"]
X.loc[X.clickouted_price_diff < 0, "clickouted_price_diff"] = 0
del co_price_df
# set two above displayed item and five below displayed item
u_cols = []
def set_undert_the_clickouted_and_islast(X, target_col, nu=5):
u_col = target_col + "_u"
X[u_col] = X["session_id"]
X.loc[X[target_col] != 1, u_col] = ""
for u in [_ for _ in range(-2, nu + 1, 1) if _ != 0]:
new_col = u_col + str(u).replace("-", "p")
X[new_col] = X[u_col].shift(u)
X[new_col] = X[new_col].fillna("")
X.loc[X[new_col] == X["session_id"], new_col] = "1"
X.loc[X[new_col] != "1", new_col] = 0
X.loc[X[new_col] == "1", new_col] = 1
u_cols.append(new_col)
X.drop(u_col, axis=1, inplace=True)
set_undert_the_clickouted_and_islast(X, "clickouted", 5)
set_undert_the_clickouted_and_islast(X, "is_last", 5)
# sum of number of above displayed item
u_coted_cols = [col for col in u_cols if "clickouted" in col]
u_islast_col = [col for col in u_cols if "is_last" in col]
X["clickouted_sum"] = X[u_coted_cols].sum(axis=1)
X["is_last_sum"] = X[u_islast_col].sum(axis=1)
# step_elapsed_mean which represents velocity of user activities.
selapsed_df = all_df[["session_id", "step", "timestamp", "timestamp_dt", "action_type", "reference"]].copy()
selapsed_df["pre_timestamp"] = selapsed_df["timestamp"].shift(1)
selapsed_df["pre_timestamp_dt"] = selapsed_df["timestamp_dt"].shift(1)
selapsed_df["pre_session_id"] = selapsed_df["session_id"].shift(1)
selapsed_df = selapsed_df[selapsed_df.session_id == selapsed_df.pre_session_id]
selapsed_df["elapsed"] = selapsed_df["timestamp"] - selapsed_df["pre_timestamp"]
selapsed_df = selapsed_df[["session_id", "elapsed"]]
selapsed_df = selapsed_df[selapsed_df.elapsed.notna()]
selapsed_df = selapsed_df[selapsed_df.elapsed > 0]
selapsed_df = selapsed_df.groupby("session_id").agg({"elapsed": np.mean}).reset_index()
selapsed_df.columns = ["session_id", "step_elapsed_mean"]
X = pd.merge(X, selapsed_df, on="session_id", how="left")
del selapsed_df
# last duration all "is it same as is_last_elapsed_time?"
lduration_all_df = all_df[["session_id", "action_type", "timestamp", "is_y"]].copy()
lduration_all_df["pre_timestamp"] = lduration_all_df["timestamp"].shift(1)
lduration_all_df["pre_session_id"] = lduration_all_df["session_id"].shift(1)
lduration_all_df = lduration_all_df[lduration_all_df.session_id == lduration_all_df.pre_session_id]
lduration_all_df["elapsed_time"] = lduration_all_df["timestamp"] - lduration_all_df["pre_timestamp"]
lduration_all_df = lduration_all_df[lduration_all_df.is_y == 1]
lduration_all_df = lduration_all_df[["session_id", "elapsed_time"]]
X = pd.merge(X, lduration_all_df, on="session_id", how="left")
del lduration_all_df
# first action_type
firsta_df = all_df[["session_id", "_session_id", "action_type", "is_y"]].copy()
firsta_df = firsta_df[firsta_df.is_y == 0] # to prevent leakage
firsta_df = firsta_df.groupby("_session_id").first().reset_index()
firsta_df = firsta_df.groupby("session_id").last().reset_index()
firsta_df.loc[firsta_df["action_type"] == "search for destination", "action_type"] = "fa_sfd"
firsta_df.loc[firsta_df["action_type"] == "interaction item image", "action_type"] = "fa_iii"
firsta_df.loc[firsta_df["action_type"] == "clickout item", "action_type"] = "fa_coi"
firsta_df.loc[firsta_df["action_type"] == "search for item", "action_type"] = "fa_sfi"
firsta_df.loc[firsta_df["action_type"] == "search for poi", "action_type"] = "fa_sfp"
firsta_df.loc[firsta_df["action_type"] == "change of sort order", "action_type"] = "fa_coso"
firsta_df.loc[firsta_df["action_type"] == "filter selection", "action_type"] = "fa_fis"
firsta_df.loc[firsta_df["action_type"] == "interaction item info", "action_type"] = "fa_iiinfo"
firsta_df.loc[firsta_df["action_type"] == "interaction item rating", "action_type"] = "fa_iirat"
firsta_df.loc[firsta_df["action_type"] == "interaction item deals", "action_type"] = "fa_iidea"
firsta_df = firsta_df[["session_id", "action_type"]]
firsta_df.columns = ["session_id", "at"]
onehot_firsta = pd.get_dummies(firsta_df, columns=['at'])
firsta_cols = list(onehot_firsta.columns)
firsta_cols.remove("session_id")
X = pd.merge(X, onehot_firsta, on="session_id", how="left")
for firsta_col in firsta_cols:
X[firsta_col] = X[firsta_col].fillna(0)
del firsta_df
del onehot_firsta
# price norm by item rating prop
X["r6"] = 0
X["r7"] = 0
X["r8"] = 0
X["r9"] = 0
X.loc[X["pSatisfactory Rating"] == 1, "r6"] = 6
X.loc[X["pGood Rating"] == 1, "r7"] = 7
X.loc[X["pVery Good Rating"] == 1, "r8"] = 8
X.loc[X["pExcellent Rating"] == 1, "r9"] = 9
X["rating"] = X[["r6", "r7", "r8", "r9"]].apply(
lambda x: np.mean(np.trim_zeros(np.array([x.r6, x.r7, x.r8, x.r9]))), axis=1)
X["rating"] = X["rating"].fillna(-1)
pns_df = X[["session_id", "rating", "price"]].groupby(["session_id", "rating"]).agg(
{'price': [np.mean, np.std]}).reset_index()
pns_df.columns = ["session_id", "rating", "r_price_mean", "r_price_std"]
pns_df["r_price_std"] = pns_df["r_price_std"].fillna(1)
X = pd.merge(X, pns_df, on=["session_id", "rating"], how="left")
X["r_price_norm"] = (X["price"].astype(float) - X["r_price_mean"].astype(float)) / X["r_price_std"].astype(
float)
del pns_df
# price norm by star
X["star"] = -1
X.loc[X["p1 Star"] == 1, "star"] = 1
X.loc[X["p2 Star"] == 1, "star"] = 2
X.loc[X["p3 Star"] == 1, "star"] = 3
X.loc[X["p4 Star"] == 1, "star"] = 4
X.loc[X["p5 Star"] == 1, "star"] = 5
pns_df = X[["session_id", "star", "price"]].groupby(["session_id", "star"]).agg(
{'price': [np.mean, np.std]}).reset_index()
pns_df.columns = ["session_id", "star", "s_price_mean", "s_price_std"]
pns_df["s_price_std"] = pns_df["s_price_std"].fillna(1)
X = pd.merge(X, pns_df, on=["session_id", "star"], how="left")
X["s_price_norm"] = (X["price"].astype(float) - X["s_price_mean"].astype(float)) / X["s_price_std"].astype(
float)
del pns_df
return X
class ByItem(object):
@classmethod
def set(cls, X, dataset):
print("... ... ByItem")
all_df = dataset["all_df"]
# imps score
impscore_df = dataset["impscore_df"]
item_props = dataset["item_props"]
X = pd.merge(X, impscore_df, on="impression", how="left")
X["impsocre"] = X["impsocre"].fillna(0)
# # append some important props and other props with over 0.2 coverage
# sum_item_props_df = dataset["sum_item_props_df"]
# prop_cols = ["pGood Rating"
# , "pVery Good Rating"
# , "pExcellent Rating"
# , "pSatisfactory Rating"
# , "p1 Star"
# , "p2 Star"
# , "p3 Star"
# , "p4 Star"
# , "p5 Star"
# , "pBusiness Centre"
# , "pBusiness Hotel"
# , "pConference Rooms"]
# c02over_prop_cols = sum_item_props_df[sum_item_props_df.coverage >= 0.2]["prop"].tolist()
# prop_cols = prop_cols + c02over_prop_cols
# prop_cols = list(set(prop_cols))
# X = pd.merge(X, item_props[["item_id"] + prop_cols], left_on="impression", right_on="item_id", how="left")
# X[prop_cols] = X[prop_cols].fillna(0)
# append item svd n_components=10
item_props_svd = dataset["item_props_svd"]
prop_svd_cols = list(item_props_svd.columns)
prop_svd_cols.remove("item_id")
X = pd.merge(X, item_props_svd, left_on="impression", right_on="item_id", how="left")
X[prop_svd_cols] = X[prop_svd_cols].fillna(0)
# # price norm by item rating prop
# X["r6"] = 0
# X["r7"] = 0
# X["r8"] = 0
# X["r9"] = 0
# X.loc[X["pSatisfactory Rating"] == 1, "r6"] = 6
# X.loc[X["pGood Rating"] == 1, "r7"] = 7
# X.loc[X["pVery Good Rating"] == 1, "r8"] = 8
# X.loc[X["pExcellent Rating"] == 1, "r9"] = 9
# X["rating"] = X[["r6", "r7", "r8", "r9"]].apply(
# lambda x: np.mean(np.trim_zeros(np.array([x.r6, x.r7, x.r8, x.r9]))), axis=1)
# X["rating"] = X["rating"].fillna(-1)
# pns_df = X[["session_id", "rating", "price"]].groupby(["session_id", "rating"]).agg(
# {'price': [np.mean, np.std]}).reset_index()
# pns_df.columns = ["session_id", "rating", "r_price_mean", "r_price_std"]
# pns_df["r_price_std"] = pns_df["r_price_std"].fillna(1)
# X = pd.merge(X, pns_df, on=["session_id", "rating"], how="left")
# X["r_price_norm"] = (X["price"].astype(float) - X["r_price_mean"].astype(float)) / X["r_price_std"].astype(
# float)
# del pns_df
#
# # price norm by star
# X["star"] = -1
# X.loc[X["p1 Star"] == 1, "star"] = 1
# X.loc[X["p2 Star"] == 1, "star"] = 2
# X.loc[X["p3 Star"] == 1, "star"] = 3
# X.loc[X["p4 Star"] == 1, "star"] = 4
# X.loc[X["p5 Star"] == 1, "star"] = 5
# pns_df = X[["session_id", "star", "price"]].groupby(["session_id", "star"]).agg(
# {'price': [np.mean, np.std]}).reset_index()
# pns_df.columns = ["session_id", "star", "s_price_mean", "s_price_std"]
# pns_df["s_price_std"] = pns_df["s_price_std"].fillna(1)
# X = pd.merge(X, pns_df, on=["session_id", "star"], how="left")
# X["s_price_norm"] = (X["price"].astype(float) - X["s_price_mean"].astype(float)) / X["s_price_std"].astype(
# float)
# del pns_df
# item ctr
ctrbyitem_df = all_df[all_df.action_type == "clickout item"][["session_id", "reference", "is_y"]].copy()
ctrbyitem_df = ctrbyitem_df[ctrbyitem_df.is_y == 0]
ref_df = ctrbyitem_df[["reference"]].groupby(["reference"]).size().reset_index()
ref_df.columns = ["impression", "rcnt"]
ref_df["ctrbyitem"] = ref_df["rcnt"].astype(float) / ref_df.shape[0]
ref_df = ref_df[["impression", "ctrbyitem"]]
X = pd.merge(X, ref_df, on="impression", how="left")
X["ctrbyitem"] = X["ctrbyitem"].fillna(0)
del ctrbyitem_df
del ref_df
# item ctr by city
cr_tmp_df = all_df[all_df.action_type == "clickout item"].copy()
cr_tmp_df = cr_tmp_df[cr_tmp_df.is_y == 0] # to prevent leakage
city_df = cr_tmp_df[["city"]].groupby(["city"]).size().reset_index()
city_df.columns = ["city", "ccnt"]
cityref_df = cr_tmp_df[["city", "reference"]].groupby(["city", "reference"]).size().reset_index()
cityref_df.columns = ["city", "impression", "rcnt"]
cityref_df = pd.merge(cityref_df, city_df, on="city", how="left")
cityref_df["ctrbycity"] = cityref_df["rcnt"].astype(float) / cityref_df["ccnt"].astype(float)
cityref_df = cityref_df[["city", "impression", "ctrbycity"]]
X = pd.merge(X, cityref_df, on=["city", "impression"], how="left")
X["ctrbycity"] = X["ctrbycity"].fillna(0)
del cr_tmp_df
del city_df
del cityref_df
# item ctr by city rank
ctrbycity_rank_df = X[["session_id", "ctrbycity"]].copy()
ctrbycity_rank_df = ctrbycity_rank_df[["session_id", "ctrbycity"]].groupby("session_id").rank(ascending=False)
ctrbycity_rank_df.columns = ["ctrbycity_rank"]
X = pd.concat([X, ctrbycity_rank_df], axis=1)
del ctrbycity_rank_df
# bayes likelihood by item
bayes_likelihood = dataset["bayes_likelihood"]
X["rlr"] = X["impression"].astype(str) + X["last_reference"].astype(str)
def set_bayes_li(rlr):
if rlr in bayes_likelihood:
return bayes_likelihood[rlr]
return 0.0
X["bayes_li"] = X[["rlr"]].apply(lambda x: set_bayes_li(x.rlr), axis=1)
# clickouted item 2 item during session
v2v_counter = dataset["v2v_counter"]
def extract_sv2v_counter(iids):
v = {}
for iid in iids:
if iid in v2v_counter:
for s in v2v_counter[iid]:
if not s in v:
v[s] = v2v_counter[iid][s]
return v
couted_df = all_df[["action_type", "session_id", "reference", "is_y"]].copy()
couted_df = couted_df[couted_df.action_type == "clickout item"]
couted_df = couted_df[couted_df.is_y == 0] # to prevent leakage
couted_df = couted_df[["session_id", "reference"]]
couted_df.columns = ["session_id", "impression"]
couted_df = couted_df[~couted_df.duplicated()]
couted_df["clickouted"] = 1
sv2v_df = couted_df.groupby("session_id").apply(
lambda x: extract_sv2v_counter(list(x.impression))).reset_index()
sv2v_df.columns = ["session_id", "sv2v"]
X = pd.merge(X, sv2v_df, on="session_id", how="left")
X["sv2v"] = X["sv2v"].fillna("{}")
X["sv2v_score"] = X[["impression", "sv2v"]].apply(
lambda x: x.sv2v[x.impression] if x.impression in x.sv2v else np.nan, axis=1)
X.drop("sv2v", axis=1, inplace=True)
sv2vs_stats = X.groupby("session_id").agg({"sv2v_score": [np.mean, np.std]}).reset_index()
sv2vs_stats.columns = ["session_id", "sv2v_score_mean", "sv2v_score_std"]
X = pd.merge(X, sv2vs_stats, on="session_id", how="left")
X["sv2v_score_norm"] = X["sv2v_score"] - X["sv2v_score_mean"] / X["sv2v_score_std"]
del couted_df
del sv2v_df
del sv2vs_stats
# some action_types are already done by each item during each session
couted_df = all_df[["action_type", "session_id", "reference"]].copy()
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"]
ated_cols = ["iired"
, "iifed"
, "iiied"
, "iided"
, "sfied"]
for i, action_type in enumerate(action_types):
at_df = couted_df[couted_df.action_type == action_type].copy()
at_df = at_df[["session_id", "reference"]]
at_df.columns = ["session_id", "impression"]
at_df = at_df[~at_df.duplicated()]
at_df[ated_cols[i]] = 1
X = pd.merge(X, at_df, on=["session_id", "impression"], how="left")
X[ated_cols[i]] = X[ated_cols[i]].fillna(0)
X[ated_cols[i]] = X[ated_cols[i]].astype(int)
del at_df
del couted_df
# dropout rate by each item during each session
dropout_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
dropout_df = dropout_df[dropout_df.action_type.isin(["interaction item image", "clickout item"])]
dropout_df = dropout_df[dropout_df.is_y == 0] # to prevent leakage
dropout_df.loc[dropout_df["action_type"] == "interaction item image", "iii"] = 1
dropout_df["iii"] = dropout_df["iii"].fillna(0)
dropout_df.loc[dropout_df["action_type"] == "clickout item", "cko"] = 1
dropout_df["cko"] = dropout_df["cko"].fillna(0)
def is_dropout(iii, cko):
if iii != 0 and cko != 0:
return 0
elif iii != 0 and cko == 0:
return 1
else:
return -1
dropout_df = dropout_df[["session_id", "reference", "iii", "cko"]].groupby(["session_id", "reference"]).apply(
lambda x: is_dropout(np.sum(x.iii), np.sum(x.cko))).reset_index()
dropout_df.columns = ["session_id", "reference", "dropout"]
dropout_df = dropout_df[dropout_df != -1]
dropout_df = dropout_df[["reference", "dropout"]].groupby("reference").apply(
lambda x: np.sum(x.dropout).astype(float) / len(x.dropout)).reset_index()
dropout_df.columns = ["impression", "dropout_rate"]
X = pd.merge(X, dropout_df, on="impression", how="left")
del dropout_df
# dropout rate by each item during all sessions
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"]
dropout_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
dropout_df = dropout_df[dropout_df.action_type.isin(action_types + ["clickout item"])]
dropout_df = dropout_df[dropout_df.is_y == 0] # to prevent leakage
dropout_df.loc[dropout_df.action_type.isin(action_types), "iii"] = 1
dropout_df["iii"] = dropout_df["iii"].fillna(0)
dropout_df.loc[dropout_df["action_type"] == "clickout item", "cko"] = 1
dropout_df["cko"] = dropout_df["cko"].fillna(0)
dropout_df = dropout_df[["session_id", "reference", "iii", "cko"]].groupby(["session_id", "reference"]).apply(
lambda x: is_dropout(np.sum(x.iii), np.sum(x.cko))).reset_index()
dropout_df.columns = ["session_id", "reference", "dropout"]
dropout_df = dropout_df[dropout_df != -1]
dropout_df = dropout_df[["reference", "dropout"]].groupby("reference").apply(
lambda x: np.sum(x.dropout).astype(float) / len(x.dropout)).reset_index()
dropout_df.columns = ["impression", "all_dropout_rate"]
X = pd.merge(X, dropout_df, on="impression", how="left")
del dropout_df
# action_type rate by each item
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
atstats_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
atstats_df = atstats_df[atstats_df.action_type.isin(action_types)]
atstats_df = atstats_df[atstats_df.is_y == 0] # to prevent leakage
atstats_df = atstats_df[["reference", "action_type"]].groupby(["reference", "action_type"]).size().reset_index()
atstats_df.columns = ["reference", "action_type", "at_cnt"]
atstats_refcnt_df = atstats_df[["reference", "at_cnt"]].groupby("reference").sum().reset_index()
atstats_refcnt_df.columns = ["reference", "rf_cnt"]
atstats_df = pd.merge(atstats_df, atstats_refcnt_df, on="reference", how="left")
atstats_df["at_rate"] = atstats_df["at_cnt"].astype(float) / atstats_df["rf_cnt"]
atstats_df = atstats_df.pivot(index='reference', columns='action_type', values='at_rate').reset_index()
at_rate_cols = ["co_at_rate", "iid_at_rate", "iii_at_rate", "iif_at_rate", "iir_at_rate", "sfi_at_rate"]
atstats_df.columns = ["impression"] + at_rate_cols
atstats_df = atstats_df.fillna(0)
X = pd.merge(X, atstats_df, on="impression", how="left")
for at_rate_col in at_rate_cols:
X[at_rate_col] = X[at_rate_col].fillna(0)
del atstats_df
# action_type rate in-session rank by each item
at_rate_cols = ["co_at_rate"
, "iid_at_rate"
, "iii_at_rate"
, "iif_at_rate"
, "iir_at_rate"
, "sfi_at_rate"]
at_rank_cols = []
for at_rate_col in at_rate_cols:
at_rank_col = at_rate_col + "_rank"
at_rank_cols.append(at_rank_col)
at_rank_df = X[["session_id", at_rate_col]].copy()
at_rank_df = at_rank_df[["session_id", at_rate_col]].groupby("session_id").rank(ascending=False)
at_rank_df.columns = [at_rank_col]
X = pd.concat([X, at_rank_df], axis=1)
del at_rank_df
# reference_elapsed_mean and by action_type
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
relapsed_df = all_df[
["session_id", "step", "timestamp", "timestamp_dt", "action_type", "reference", "is_y"]].copy()
relapsed_df["pre_timestamp"] = relapsed_df["timestamp"].shift(1)
relapsed_df["pre_timestamp_dt"] = relapsed_df["timestamp_dt"].shift(1)
relapsed_df["pre_session_id"] = relapsed_df["session_id"].shift(1)
relapsed_df = relapsed_df[relapsed_df.session_id == relapsed_df.pre_session_id]
relapsed_df["elapsed"] = relapsed_df["timestamp"] - relapsed_df["pre_timestamp"]
relapsed_df = relapsed_df[relapsed_df.action_type.isin(action_types)]
relapsed_df = relapsed_df[relapsed_df.is_y == 0] # to prevent leakage
relapsed_df = relapsed_df[relapsed_df.elapsed.notna()]
relapsed_df = relapsed_df[relapsed_df.elapsed > 0]
r_relapsed_df = relapsed_df[["reference", "elapsed"]].groupby("reference").agg(
{"elapsed": np.mean}).reset_index()
r_relapsed_rate_cols = ["ref_elapsed_mean"]
r_relapsed_df.columns = ["impression"] + r_relapsed_rate_cols
a_relapsed_df = relapsed_df[["reference", "action_type", "elapsed"]].groupby(["reference", "action_type"]).agg(
{"elapsed": np.mean}).reset_index()
a_relapsed_df.columns = ["reference", "action_type", "at_elapsed_mean"]
a_relapsed_df = a_relapsed_df.pivot(index='reference', columns='action_type',
values='at_elapsed_mean').reset_index()
a_relapsed_rate_cols = ["co_ref_elapsed_mean", "iid_ref_elapsed_mean", "iii_ref_elapsed_mean",
"iif_ref_elapsed_mean", "iir_ref_elapsed_mean", "sfi_ref_elapsed_mean"]
a_relapsed_df.columns = ["impression"] + a_relapsed_rate_cols
X = pd.merge(X, r_relapsed_df, on="impression", how="left")
X = pd.merge(X, a_relapsed_df, on="impression", how="left")
del relapsed_df
del r_relapsed_df
del a_relapsed_df
# tsh "time split by hour" item ctr
tsh_df = all_df[all_df.action_type == "clickout item"][
["session_id", "action_type", "reference", "timestamp_dt", "is_y"]].copy()
tsh_df["tsh24"] = -1
X["tsh24"] = -1
ts_min = tsh_df["timestamp_dt"].min()
ts_max = tsh_df["timestamp_dt"].max()
def set_tscol(hours):
tscol = "tsh" + str(hours)
ts_start = ts_min
ts_end = ts_start + datetime.timedelta(hours=hours)
ts_bin = 1
while True:
tsh_df.loc[(tsh_df.timestamp_dt >= ts_start) & (tsh_df.timestamp_dt < ts_end), tscol] = ts_bin
X.loc[(X.timestamp_dt >= ts_start) & (X.timestamp_dt < ts_end), tscol] = ts_bin
ts_start = ts_end
ts_end = ts_start + datetime.timedelta(hours=hours)
if ts_start > ts_max:
break
ts_bin += 1
set_tscol(24)
tsh_df = tsh_df[tsh_df.is_y == 0]
tsh24_df = tsh_df[["tsh24"]].groupby(["tsh24"]).size().reset_index()
tsh24_df.columns = ["tsh24", "allcnt"]
tsh24ref_df = tsh_df[["tsh24", "reference"]].groupby(["tsh24", "reference"]).size().reset_index()
tsh24ref_df.columns = ["tsh24", "impression", "rcnt"]
tsh24ref_df = pd.merge(tsh24ref_df, tsh24_df, on="tsh24", how="left")
tsh24ref_df["ctrbytsh24"] = tsh24ref_df["rcnt"].astype(float) / tsh24ref_df["allcnt"].astype(float)
tsh24ref_df = tsh24ref_df[["tsh24", "impression", "ctrbytsh24"]]
X = pd.merge(X, tsh24ref_df, on=["tsh24", "impression"], how="left")
X["ctrbytsh24"] = X["ctrbytsh24"].fillna(0)
del tsh_df
del tsh24_df
del tsh24ref_df
# item ctr by some props
ctrbyprops_df = all_df[all_df.action_type == "clickout item"][["session_id", "reference", "is_y"]].copy()
ctrbyprops_df.columns = ["session_id", "item_id", "is_y"]
star_cols = ["p1 Star", "p2 Star", "p3 Star", "p4 Star", "p5 Star"]
rating_cols = ["pSatisfactory Rating", "pGood Rating", "pVery Good Rating", "pExcellent Rating"]
ctrbyprops_df = pd.merge(ctrbyprops_df, item_props[["item_id"] + star_cols + rating_cols], on="item_id",
how="left")
ctrbyprops_df["star"] = -1
ctrbyprops_df.loc[ctrbyprops_df["p1 Star"] == 1, "star"] = 1
ctrbyprops_df.loc[ctrbyprops_df["p2 Star"] == 1, "star"] = 2
ctrbyprops_df.loc[ctrbyprops_df["p3 Star"] == 1, "star"] = 3
ctrbyprops_df.loc[ctrbyprops_df["p4 Star"] == 1, "star"] = 4
ctrbyprops_df.loc[ctrbyprops_df["p5 Star"] == 1, "star"] = 5
ctrbyprops_df["r6"] = 0
ctrbyprops_df["r7"] = 0
ctrbyprops_df["r8"] = 0
ctrbyprops_df["r9"] = 0
ctrbyprops_df.loc[ctrbyprops_df["pSatisfactory Rating"] == 1, "r6"] = 6
ctrbyprops_df.loc[ctrbyprops_df["pGood Rating"] == 1, "r7"] = 7
ctrbyprops_df.loc[ctrbyprops_df["pVery Good Rating"] == 1, "r8"] = 8
ctrbyprops_df.loc[ctrbyprops_df["pExcellent Rating"] == 1, "r9"] = 9
ctrbyprops_df["rating"] = ctrbyprops_df[["r6", "r7", "r8", "r9"]].apply(
lambda x: np.mean(np.trim_zeros(np.array([x.r6, x.r7, x.r8, x.r9]))), axis=1)
ctrbyprops_df["rating"] = ctrbyprops_df["rating"].fillna(-1)
ctrbyprops_df["star_rating"] = "sr_" + ctrbyprops_df["star"].astype(str) + "_" + ctrbyprops_df["rating"].astype(
str)
ctrbyprops_df = ctrbyprops_df[["session_id", "star_rating", "item_id", "is_y"]]
ctrbyprops_df = ctrbyprops_df[ctrbyprops_df.is_y == 0] # to prevent leakage
ctrbyprops_df = ctrbyprops_df[["item_id", "star_rating"]]
ctrbyprops_df.columns = ["impression", "star_rating"]
prop_df = ctrbyprops_df[["star_rating"]].groupby(["star_rating"]).size().reset_index()
prop_df.columns = ["star_rating", "allcnt"]
propref_df = ctrbyprops_df[["star_rating", "impression"]].groupby(
["star_rating", "impression"]).size().reset_index()
propref_df.columns = ["star_rating", "impression", "rcnt"]
propref_df = pd.merge(propref_df, prop_df, on="star_rating", how="left")
propref_df["ctrbyprops"] = propref_df["rcnt"].astype(float) / propref_df["allcnt"].astype(float)
propref_df = propref_df[["star_rating", "impression", "ctrbyprops"]]
X["star_rating"] = "sr_" + X["star"].astype(str) + "_" + X["rating"].astype(str)
X = pd.merge(X, propref_df, on=["star_rating", "impression"], how="left")
X["ctrbyprops"] = X["ctrbyprops"].fillna(0)
del ctrbyprops_df
del prop_df
del propref_df
# is no serach item
action_types = ["clickout item"]
is_nosi_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
is_nosi_df = is_nosi_df.groupby("session_id").first().reset_index()
is_nosi_df = is_nosi_df[(is_nosi_df.action_type.isin(action_types)) & (is_nosi_df.is_y == 0)]
is_nosi_df = is_nosi_df[["reference"]].groupby("reference").size().reset_index()
is_nosi_df.columns = ["impression", "nosearch_cnt"]
X = pd.merge(X, is_nosi_df, on="impression", how="left")
X["nosearch_cnt"] = X["nosearch_cnt"].fillna(0)
del is_nosi_df
return X
class BySession(object):
@classmethod
def set(cls, X, dataset):
print("... ... BySession as Motivation")
all_df = dataset["all_df"]
# item ratio of appearance by each session
def get_precnt_ratio(x):
pre_references = str(x.pre_references).split("|")
len_pre_ref = len(pre_references)
if len_pre_ref != 0:
return np.float(pre_references.count(x.impression)) / len_pre_ref
return 0
preref_df = all_df[all_df.action_type != "clickout item"].groupby("session_id").apply(
lambda x: "|".join([r for r in list(x.reference) if str.isnumeric(r)])).reset_index()
preref_df.columns = ["session_id", "pre_references"]
X = pd.merge(X, preref_df, on="session_id", how="left")
X[["pre_references"]] = X[["pre_references"]].fillna("")
X["precnt_ratio"] = X[["impression", "pre_references"]].apply(lambda x: get_precnt_ratio(x), axis=1)
del preref_df
# action_type ratio of appearance by each session
atype_long_names = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
atype_short_names = ["interaction_item_rating_ratio"
, "iif_ratio"
, "iii_ratio"
, "iid_ratio"
, "sfi_ratio"
, "co_ratio"]
preref_df2 = all_df[all_df.action_type.isin(atype_long_names)][
["session_id", "reference", "action_type", "is_y"]].copy()
preref_df2 = preref_df2[preref_df2.is_y == 0] # to prevent leakage
preref_df2 = preref_df2[["session_id", "reference", "action_type"]]
preref_df3 = preref_df2[["session_id"]].groupby("session_id").size().reset_index()
preref_df3.columns = ["session_id", "cnt"]
preref_df2 = pd.get_dummies(preref_df2, columns=['action_type'])
preref_df2 = preref_df2.groupby(["session_id", "reference"]).sum().reset_index()
preref_df2.columns = ["session_id", "impression"] + atype_short_names
preref_df2 = | pd.merge(preref_df2, preref_df3, on="session_id", how="left") | pandas.merge |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import pandas as pd
import pandas.util.testing as pdt
import qiime2
from q2_taxa import collapse, filter_table, filter_seqs
class CollapseTests(unittest.TestCase):
def assert_index_equal(self, a, b):
# this method is derived from scikit-bio 0.5.1
pdt.assert_index_equal(a, b,
exact=True,
check_names=True,
check_exact=True)
def assert_data_frame_almost_equal(self, left, right):
# this method is derived from scikit-bio 0.5.1
pdt.assert_frame_equal(left, right,
check_dtype=True,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False)
self.assert_index_equal(left.index, right.index)
def test_collapse(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;c', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_missing_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;__', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_bad_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
with self.assertRaisesRegex(ValueError, 'of 42 is larger'):
collapse(table, taxonomy, 42)
with self.assertRaisesRegex(ValueError, 'of 0 is too low'):
collapse(table, taxonomy, 0)
def test_collapse_missing_table_ids_in_taxonomy(self):
table = pd.DataFrame([[2.0, 2.0],
[1.0, 1.0],
[9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat3'])
with self.assertRaisesRegex(ValueError, 'missing.*feat2'):
collapse(table, taxonomy, 1)
class FilterTable(unittest.TestCase):
def test_filter_no_filters(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'At least one'):
filter_table(table, taxonomy)
def test_alt_delimiter(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# include with delimiter
obs = filter_table(table, taxonomy, include='<EMAIL>',
query_delimiter='@peanut@')
pdt.assert_frame_equal(obs, table, check_like=True)
# exclude with delimiter
obs = filter_table(table, taxonomy, exclude='<EMAIL>',
query_delimiter='@peanut@')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
def test_filter_table_unknown_mode(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'Unknown mode'):
filter_table(table, taxonomy, include='bb', mode='not-a-mode')
def test_filter_table_include(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='bb')
pdt.assert_frame_equal(obs, table, check_like=True)
obs = filter_table(table, taxonomy, include='cc,ee')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, include='cc')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='aa; bb; cc')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, include='dd')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='dd ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='aa; bb; dd ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, include='peanut!')
def test_filter_table_include_exact_match(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='aa; bb; cc,aa; bb; dd ee',
mode='exact')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, include='aa; bb; cc',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, include='aa; bb; dd ee',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, include='bb', mode='exact')
def test_filter_table_exclude(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, exclude='ab')
pdt.assert_frame_equal(obs, table, check_like=True)
obs = filter_table(table, taxonomy, exclude='xx')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, exclude='dd')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='dd ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, exclude='aa')
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, exclude='aa; bb')
def test_filter_table_exclude_exact_match(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, exclude='peanut!',
mode='exact')
| pdt.assert_frame_equal(obs, table, check_like=True) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Compare one dataset to another at a variety of p-value cutoffs.
Author: <NAME> (Fraser Lab, Stanford University)
License: MIT
Version: 1.0b2
Created: 2018-05-30
Updated: 2018-05-31
See the README at:
https://github.com/TheFraserLab/enrich_pvalues/blob/master/README.rst
This software is split into four modes to allow easy p-value cutoff enrichment
plotting for any two datasets that share names (e.g. SNPs) and have p-values.
"""
from __future__ import print_function
import os as _os
import sys as _sys
import bz2 as _bz2
import gzip as _gzip
import json as _json
import argparse as _argparse
import math
import numpy as np
import pandas as pd
from tabulate import tabulate as _tab
from tqdm import tqdm as _tqdm
__version__ = '1.0b2'
# How many ticks to have on the two y-axes
YTICK_COUNT = 15
###############################################################################
# Core Enrichment Algorithm #
###############################################################################
def enrich_study(dataset, sig_comp, nsig_comp, simp_fl=False,
low_mem=False, conf=None):
"""Compute enrichment of significant data in sig_comp and nsig_comp.
This is the core algorithm of this script.
Read in all data from dataset and then take all names that beat a
significance cutoff (set in conf) and compare to names in sig_comp and
nsig_comp, computing an enrichment score (percentage in each, sig/nsig).
Repeats this for every p-value cutoff between conf['max_pval'] (default
0.05) and conf['min_pval'] (default 1e-15). We check each p-value cutoff
at intervals of 1e-x and 5e-x for each exponent x between the max and min.
Params
------
dataset : str
Path to data table to test, must contain names and p-values. Parse
instructions in the config
sig_comp : str or set
Set of names that are in the significant set, or path to a newline
separated set (made by split_study)
nsig_comp : str or set
Same as sig_comp, but for the non-significant comparison set
simp_fl : bool, optional
Treat the study_file as a two column file with no header, where the
columns are name and pvalue, separated by a tab
low_mem : bool, optional, not implemented
Do not load the whole set of names and p-values at the same time,
parse file over again for every comparison. Slower, but saves memory
for very larde datasets.
conf : dict, optional
A config file to use for parsing.
Returns
-------
[(p-value-cutoff), (enrichment-score)]
"""
# Build a range of cutoffs to use
max_exp = abs(math.floor(math.log10(conf['max_pval'])))
min_exp = abs(math.floor(math.log10(conf['min_pval'])))
cutoffs = []
for exp in range(max_exp, min_exp+1):
# Cumbersome way to handle floating point errors
cutoffs.append(float('{0:1e}'.format(5*(10**-exp))))
cutoffs.append(float('{0:1e}'.format(1*(10**-exp))))
_sys.stderr.write('Testing cutoffs:\n{0}\n'.format(cutoffs))
# Get the comparison sets
_sys.stderr.write('Getting comparison data\n')
scmp = get_set(sig_comp)
ncmp = get_set(nsig_comp)
del sig_comp
del nsig_comp
_sys.stderr.write(
'Got {0} sig names and {1} nonsig names.\n'
.format(len(scmp), len(ncmp))
)
# Get the right iterator
if simp_fl:
sit = simple_iterator(dataset)
else:
sit = study_iterator(
dataset, conf['test_sep'], conf['test_has_header'],
conf['test_name_col'], conf['test_pval_col']
)
# Keep only the two columns we care about, and only those that beat
# the max p-value we will test
data = []
add_data = data.append
max_cutoff = conf['max_pval']
_sys.stderr.write(
'Reading data file, keeping those less than P {0}\n'.format(max_cutoff)
)
for name, pval in _tqdm(sit, unit=' rows', disable=None):
if pval <= max_cutoff:
add_data([name, pval])
# Make our dataframe
_sys.stderr.write('Converting to DataFrame\n')
data = pd.DataFrame(data, columns=['name', 'pval'])
# Make unique, keep most significant
data = data.sort_values('pval').drop_duplicates(['name', 'pval'])
# Compute score
scores = {}
_sys.stderr.write('Calculating enrichments\n')
for cutoff in _tqdm(cutoffs, unit=' cuttofs', disable=None):
test_set = frozenset(data[data.pval <= cutoff].name)
test_len = len(test_set)
sigyl = len(test_set & scmp)
nsigyl = len(test_set & ncmp)
sigy = sigyl/test_len
nsigy = nsigyl/test_len
if nsigy == 0:
score = np.nan
else:
score = sigy/nsigy
scores['{0:1e}'.format(cutoff)] = {
'cutoff': cutoff,
'sig_data': test_len,
'sig_overlap': sigyl,
'nonsig_overlap': nsigyl,
'sig_score': sigy,
'nonsig_score': nsigy,
'enrichment_score': score
}
# Free memory
_sys.stderr.write('Done. Clearing memory\n')
del data, scmp, ncmp
_sys.stderr.write('Making DataFrame\n')
scores = pd.DataFrame.from_dict(scores, orient='index')
scores = scores.sort_values('cutoff', ascending=False)
print(scores)
return scores
def plot_scores(scores, outfile=None, figsize=(14,10), comp_prefix=None,
raw=False, show_one=False, lock_grid=False):
"""Plot enrichment score and sig count against cutoff.
Enrichment scores end up on left y axis, total number of significant right
y axis. x axis is the cutoffs.
Params
------
scores : DataFrame
from enrich_study
outfile : str, optional
Path to write figure to
figsize : tuple, optional
Size of figure
comp_prefix : str, optional
The prefix of the comparison data to use for title creation
raw : bool, optional
Plot raw counts instead of percentages
Returns
-------
fig, [ax1, ax2]
"""
from matplotlib import pyplot as plt
from matplotlib.ticker import FuncFormatter
import seaborn as sns
if comp_prefix:
title = 'Enrichment vs {0} Data'.format(comp_prefix)
else:
title = 'Enrichment Scores at Various P-Value Cutoffs'
scores.reset_index(inplace=True)
sns.set_style('darkgrid')
sns.set_palette('deep')
fig, ax1 = plt.subplots(figsize=figsize)
# Plot counts
if raw:
cnt = YTICK_COUNT
scl = 0.01
scores.plot.line(y='sig_data', color='orange', ax=ax1, legend=False)
mx = scores.sig_data.max()
ax1.set_ylabel(
'Number Kept\n(Max {0:,}, Min {1:,})'.format(
mx, scores.sig_data.min()
), fontsize=14
)
set_yspace(ax1, start=0, end=mx, count=cnt, fixed=lock_grid, scale=scl)
else:
cnt = 10
scl = 0.01
scores['perc'] = scores.sig_data/scores.sig_data.max()
scores.plot.line(y='perc', color='orange', ax=ax1, legend=False)
ax1.set_ylabel(
'Percentage Kept vs P={0}'.format(scores.cutoff.max()),
fontsize=14
)
ax1.yaxis.set_major_formatter(
FuncFormatter(lambda y, _: '{0:.0%}'.format(y))
)
set_yspace(
ax1, start=0, end=1, count=cnt, fixed=True, scale=scl
)
# Format and label x-axis
ax1.set_xlabel('p-value cutoff', fontsize=14)
ax1.set_xticks(range(0, len(scores)+1, 1))
ax1.set_xticklabels(
scores.cutoff.apply(
lambda x: '{0}'.format(float('{0:1e}'.format(x)))
), rotation=45
)
# Plot enrichment score on opposite x-axis
ax2 = ax1.twinx()
scores.plot.line(
y='enrichment_score', color='orchid', ax=ax2, legend=False
)
ax2.set_ylabel(
'Enrichment Score\n(sig:non-sig enrichment)',
fontsize=14
)
set_yspace(
ax2, start=0, end=scores.enrichment_score.max(), count=cnt,
fixed=lock_grid, scale=scl
)
# Color enrichment below 1.0 as grey
ymin, ymax = ax2.get_ylim()
if ymax >= 1.1:
x = np.array(scores.index.to_series())
y = np.array(scores.enrichment_score)
x, y = get_x_y_at_one(x, y)
y_mask = np.ma.masked_greater(y, 1.001) # Mask everything below 1.0
ax2.plot(x, y_mask, color='grey')
# Highlight the 1.0 line
if show_one:
xmin, xmax = ax2.get_xlim()
ax2.plot((xmin, xmax), (1.0, 1.0), c='purple', alpha=0.2, zorder=-10)
ax2.text(xmax, 0.98, '1.0', fontdict={'fontsize': 12, 'weight': 'bold'})
ax2.set_xlim(xmin, xmax)
# Only write the grid once
ax2.grid(None)
# Write the title
ax2.set_title(title, fontsize=17)
# Format and add legend
fig.tight_layout()
fig.legend(
labels=('Percentage Kept', 'Enrichment Score'),
loc='lower right'
)
if outfile:
fig.savefig(outfile)
return fig, [ax1, ax2]
def set_yspace(ax, start, end, count=YTICK_COUNT, fixed=False, scale=None):
"""Set the matplotlib spacing for the y-axis."""
from matplotlib.ticker import LinearLocator
from matplotlib.ticker import MaxNLocator
# Float everything
start = float(start)
end = float(end)
# Round up the end
nlen = len(str(int(end)))
lp = float(10**(nlen-1))
end = math.ceil(end/lp)*lp
# Round down the start
start = math.floor(start/lp)*lp
# Set axes limits
ax.set_ylim(start, end)
# Set the tick counts
if fixed:
ax.yaxis.set_major_locator(
LinearLocator(count)
)
else:
ax.yaxis.set_major_locator(
MaxNLocator(nbins=count)
)
if scale:
scl = end*scale
ystart = start-scl
yend = end+scl
yticks = ax.get_yticks()
ax.set_ylim(ystart, yend)
ax.set_yticks(yticks)
# Return the linspace version of above, should be the same, but position
# is not guaranteed
return np.linspace(start, end, count)
def get_x_y_at_one(x, y):
"""Return x, y with x=1.0 added if it doesn't exist."""
from scipy import stats as sts
pre = None
fin = None
xl = list(x)
yl = list(y)
for c, i in enumerate(y):
if i == 1.0:
print(i, 'is 1')
return x, y
if i > 1.0:
if i == 0:
return x, y
tx = (xl[c-1], xl[c])
ty = (yl[c-1], yl[c])
break
reg = sts.linregress(tx, ty)
# new_y = (reg.slope*1.0)+reg.intercept
new_x = (1.0-reg.intercept)/reg.slope
xl.insert(c, new_x)
yl.insert(c, 1.0)
return np.array(xl), np.array(yl)
def get_set(x):
"""Return frozenset from x if x is iterable or newline separated file."""
if isinstance(x, str):
with open_zipped(x) as fin:
return frozenset(fin.read().strip().split('\n'))
return frozenset(x)
###############################################################################
# Splitting Comparison Study #
###############################################################################
def split_study(study_file, prefix=None, simp_fl=False, low_mem=False,
conf=None):
"""Split a sample into a significant file and a non-sig file.
Params
------
study_file : str
Path the the file to parse, can be gzipped.
prefix : str, optional
A prefix to use for output files, default is study_file name.
simp_fl : bool, optional
Treat the study_file as a two column file with no header, where the
columns are name and pvalue, separated by a tab
low_mem : bool, optional, not implemented
Parse file line by line, instead of using pandas. Currently only
low-mem works
conf : dict, optional
A config file to use for parsing.
Writes
------
<prefix>.sig.txt.gz, <prefix>.non-sig.txt.gz
Two newline separated of names that are significant or non-significant.
"""
prefix = prefix if prefix else str(study_file)
sig_fl = prefix + '.sig.txt.gz'
nsig_fl = prefix + '.nonsig.txt.gz'
# Cutoffs
sig_p = float(conf['comp_sig_pval'])
non_sig_p = float(conf['comp_nonsig_pval'])
_sys.stderr.write(
'Significant set is P <= {0}, non-sig is P >= {1}\n'
.format(sig_p, non_sig_p)
)
if simp_fl:
sample = simple_iterator(study_file)
else:
sample = study_iterator(
study_file, conf['comp_sep'], conf['comp_has_header'],
conf['comp_name_col'], conf['comp_pval_col']
)
sig_set = set()
nsig_set = set()
add_to_sig = sig_set.add
add_to_nsig = nsig_set.add
_sys.stderr.write('Splitting dataset\n')
for name, p_val in _tqdm(sample, unit=' rows', disable=None):
if p_val <= sig_p:
add_to_sig(name)
elif p_val >= non_sig_p:
add_to_nsig(name)
_sys.stderr.write('Sorting results and writing\n')
with open_zipped(sig_fl, 'w') as sigf, open_zipped(nsig_fl, 'w') as nsgf:
sigf.write('\n'.join(sorted(sig_set)))
nsgf.write('\n'.join(sorted(nsig_set)))
_sys.stderr.write(
'Splitting done, written {} rows to {} and {} rows to {}\n'
.format(len(sig_set), sig_fl, len(nsig_set), nsig_fl)
)
###############################################################################
# File Iterators #
###############################################################################
def study_iterator(infile, sep, has_header, name_col, pval_col):
"""Iterate through infile, yield (name, p-value).
Params
------
infile : str
Path to a file to work on.
sep : str
Single character to split on
has_header : bool
Is first line a header
name_col : str or int
Name of col as str if has_header is True, else 0-base column index
pval_col : str or int
Name of col as str if has_header is True, else 0-base column index
Yields
------
name : str
Name of record
p-value : float
P-Value of record
"""
with open_zipped(infile) as fin:
if has_header:
header = fin.readline().strip().split(sep)
name_idx = header.index(name_col)
pval_idx = header.index(pval_col)
else:
try:
name_idx = int(name_col)
pval_idx = int(pval_col)
except ValueError:
_sys.stderr.write(
'Comp column names must be numbers if no header\n'
)
raise
for line in fin:
f = line.rstrip().split(sep)
yield f[name_idx], float(f[pval_idx])
def simple_iterator(infile):
"""Iterate through infile, yield (col 1, col 2)."""
with open_zipped(infile) as fin:
for line in fin:
f = line.rstrip().split('\t')
assert len(f) == 2
yield f[0], float(f[1])
###############################################################################
# Config #
###############################################################################
# Contains defaults and help, used to generate a simple key: value dictionary
DEFAULT_CONFIG = {
'test_sep': {
'default': '\t', 'help': 'Separator used in the test dataset'
},
'comp_sep': {
'default': '\t', 'help': 'Separator used in the comparison dataset'
},
'test_has_header': {
'default': 1, 'help': '1 if has header, 0 if does not'
},
'test_name_col': {
'default': 'name',
'help': 'Column name (or number if no header) for names in test data'
},
'test_pval_col': {
'default': 'p_value',
'help': 'Column name (or number if no header) for pvals in test data'
},
'comp_has_header': {
'default': 1, 'help': '1 if has header, 0 if does not'
},
'comp_name_col': {
'default': 'name',
'help': 'Column name (or number if no header) for names in comparison data'
},
'comp_pval_col': {
'default': 'p_value',
'help': 'Column name (or number if no header) for pvals in comparison data'
},
'max_pval': {
'default': 0.05, 'help': 'Max pvalue to test enrichment for'
},
'min_pval': {
'default': 1e-15, 'help': 'Min pvalue to test enrichment for'
},
'comp_sig_pval': {
'default': 1e-4,
'help': 'pvalue to use as significant cutoff when splitting comparison data'
},
'comp_nonsig_pval': {
'default': 0.98,
'help': 'pvalue to use as not-significant cutoff when splitting comparison data'
}
}
def get_default_conf():
"""Return simple dict from DEAFULT_CONFIG."""
return {k: v['default'] for k, v in DEFAULT_CONFIG.items()}
def conf_help(outstream=_sys.stdout):
"""Print config help to outstream (default STDOUT)."""
conf = [
[k, repr(v['default']), v['help']] for k, v in DEFAULT_CONFIG.items()
]
help = _tab(conf, headers=['variable', 'default', 'help'])
help = 'Config file can contain the following values:\n\n{}\n'.format(
help
)
if outstream:
outstream.write(help)
return help
def parse_config_file(conf_file=None):
"""Load a dict from a json file and update it with defaults.
Params
------
conf_file : str, optional
Path to a json file. If None, just return default conf.
Returns
-------
config : dict
"""
conf = get_default_conf()
if conf_file:
with open_zipped(conf_file) as fin:
conf.update(_json.load(fin))
return conf
###############################################################################
# Helpers #
###############################################################################
def open_zipped(infile, mode='r'):
"""Return file handle of file regardless of compressed or not.
Also returns already opened files unchanged, text mode automatic for
compatibility with python2.
"""
# return already open files
if hasattr(infile, 'write'):
return infile
# make text mode automatic
if len(mode) == 1:
mode = mode + 't'
# refuse to handle non-strings that aren't files.
if not isinstance(infile, str):
raise ValueError("I cannot open a filename that isn't a string.")
# treat '-' appropriately
if infile == '-':
if 'w' in mode:
return _sys.stdout
return _sys.stdin
# if possible open zipped files
if infile.endswith('.gz'):
return _gzip.open(infile, mode)
if infile.endswith('.bz2'):
if hasattr(_bz2, 'open'):
return _bz2.open(infile, mode)
return _bz2.bz2file(infile, mode)
# fall back on regular open
return open(infile, mode)
###########################################################################
# Command Line Parsing #
###########################################################################
EPILOG = """
dump-config
-----------
The two datasets are described in an optional json config (controlled by the
DEFAULT_CONFIG constant in this script). To edit these settings, run
dump-config to get a json file to edit
split
-----
This mode takes a comparison dataset (described by the config file) and splits
it into two simple sets—a significant set, and a non-significant set. The
p-values that describe this division are set in the config.
The files from this step are required for the main algorithm, althugh they
don't have to be made with this algorithm, you can make them yourself.
run
---
Run the main algorithm—for each p-value cutoff (controlled by config), get
percentage of names in significant set from split mode (sig_overlap), and another
percentage of names in non-significant set (nsig_overlap) and caluclate an
enrichment score from the ratio of the two (sig_overlap:nsig_overlap).
Returns a pandas table with counts and enrichment scores for each cutoff.
Writes this as either a text file, a pandas file, or an excel file
plot
----
Create a plot from the table generated in run mode. The plot has the cutoffs
on the x-axis, the total number of names kept in the cutoff on the left y-axis,
and the enrichment score on the right y-axis.
"""
# Mode descriptions
MAIN_DESC = """\
Run the enrichment.
Requires a split comparison dataset, similar to the one produced by the
split_list mode. Inputs are (optionally compressed) tables. The default is to
expect a tab-delimited table with a header line where the column names are
'name' and 'p-value'. If ``--simple-file`` is passed, the expected input is a
two column file of name\\tp-value, with no header row.
To customize this, pass a json config file, the defaults can be written out by
running dump_config.
"""
SPLIT_DESC = """\
Split an existing study into a highly significant set and a non-significant set.
The expected input is an (optionally compressed) tab delimited file with a
header line and one column labelled 'name' and another labelled 'p-value'. If
``--simple-file`` is passed, the expected input is a two column file of
name\\tp-value, with no header row.
To customize this, pass a json config file, the defaults can be written out by
running dump_config.
The outputs are two newline-separated compressed files where each line is a
name to compare to (case-sensitive). The prefix can be specified with
'--prefix' (default is the same name as the input), the two suffices are
'sig.txt.gz' and 'non-sig.txt.gz', this is non-configurable.
By default, sig gets everything with a p-value smaller than 1e-4, non-sig gets
everything with a p-value greater than 0.99. These values can be configured
in the config.
"""
CONF_DESC = """\
Dump a default config to config file.
Optionally, you can pass an existing config file, and that one will be used
to update the defaults before writing the output conf file.
"""
def core_args(args):
"""Run the enrichment."""
conf = parse_config_file(args.config_file)
if args.max_p:
conf['max_pval'] = args.max_p
if args.min_p:
conf['min_pval'] = args.min_p
sig_comp = args.prefix + '.sig.txt.gz'
nsig_comp = args.prefix + '.nonsig.txt.gz'
bad = []
for fl in [sig_comp, nsig_comp]:
if not _os.path.isfile(fl):
bad.append(fl)
if bad:
raise OSError(
'Need both {0} and {1} to exist, missing {2}'
.format(sig_comp, nsig_comp, bad)
)
scores = enrich_study(
args.data, sig_comp, nsig_comp, simp_fl=args.simple_file,
low_mem=False, conf=conf
)
if args.output:
_sys.stderr.write('Writing score table to {0}\n'.format(args.output))
if args.output.endswith('xls') or args.output.endswith('xlsx'):
scores.to_excel(
args.output, index=False, sheet_name='Enrichment Scores'
)
elif args.output.endswith('pd') or args.output.endswith('pickle'):
scores.to_pickle(args.output)
else:
with open_zipped(args.output, 'w') as fout:
scores.to_csv(fout, sep=conf['test_sep'], index=False)
if args.plot:
_sys.stderr.write('Plotting scores to {0}\n'.format(args.plot))
plot_scores(scores, outfile=args.plot, comp_prefix=args.prefix)
def plot_args(args):
"""Run plotting only."""
conf = parse_config_file(args.config_file)
_sys.stderr.write('Getting scores\n')
if args.scores.endswith('xls') or args.scores.endswith('xlsx'):
scores = pd.read_excel(args.scores, sheet_name='Enrichment Scores')
elif args.scores.endswith('pd') or args.scores.endswith('pickle'):
scores = pd.read_pickle(args.scores)
else:
with open_zipped(args.scores) as fin:
scores = | pd.read_csv(fin, sep=conf['test_sep']) | pandas.read_csv |
from datetime import datetime
from pandas.api.types import is_datetime64_any_dtype
from pandas.api.types import is_period_dtype
from pandas.core.common import flatten
from functools import wraps
from copy import deepcopy
import logging
import numpy as np
import pandas as pd
import re
from typing import (
Any,
Callable,
Dict,
Hashable,
Iterable,
List,
NamedTuple,
Optional,
Pattern,
Set,
Tuple,
Union,
)
logger = logging.getLogger(__name__)
# across {{{1
def across(df: pd.DataFrame,
columns: Union[str, Tuple[str], List[str]] = None,
function: Callable = None,
series_obj: bool = False,
*args, **kwargs) -> pd.DataFrame:
'''Apply function across multiple columns
Across allows you to apply a function across a number of columns in one
statement. Functions can be applied to series values (via apply()) or access
pd.Series object methods.
In pandas, to apply the same function (on a Series/columns' values) you
would normally do something like this:
.. code-block::
df['column'].apply(function)
df['column2'].apply(function)
df['column3'].apply(function)
Piper equivalent would be:
.. code-block::
across(df, ['column1', 'column2', 'column3'], function)
You can also work with Series object functions by passing keyword
series_obj=True. In Pandas, if you wanted to change the dtype of a column
you would use something like:
.. code-block::
df['col'] = df['col'].astype(float)
df['col2'] = df['col2'].astype(float)
df['col3'] = df['col3'].astype(float)
The equivalent with across would be:
.. code-block::
df = across(df, ['col', 'col2', 'col3'], function=lambda x: x.astype(float))
Parameters
----------
df
pandas dataframe
columns
column(s) to apply function.
- If a list is provided, only the columns listed are affected by the
function.
- If a tuple is supplied, the first and second values will
correspond to the from and to column(s) range used to apply the function to.
function
function to be called.
series_obj
Default is False.
True - Function applied at Series or (DataFrame) 'object' level.
False - Function applied to each Series row values.
Returns
-------
A pandas dataframe
Examples
--------
See below, example apply a function applied to each of the columns row
values.
.. code-block:: python
%%piper
sample_data()
>> across(['dates', 'order_dates'], to_julian)
# Alternative syntax, passing a lambda...
>> across(['order_dates', 'dates'], function=lambda x: to_julian(x), series_obj=False)
>> head(tablefmt='plain')
dates order_dates countries regions ids values_1 values_2
0 120001 120007 Italy East A 311 26
1 120002 120008 Portugal South D 150 375
2 120003 120009 Spain East A 396 88
3 120004 120010 Italy East B 319 233
.. code-block:: python
%%piper
sample_data()
>> across(['dates', 'order_dates'], fiscal_year, year_only=True)
>> head(tablefmt='plain')
dates order_dates countries regions ids values_1 values_2
0 FY 19/20 FY 19/20 Italy East A 311 26
1 FY 19/20 FY 19/20 Portugal South D 150 375
2 FY 19/20 FY 19/20 Spain East A 396 88
3 FY 19/20 FY 19/20 Italy East B 319 233
Accessing Series object methods - by passing series_obj=True you can also
manipulate series object and string vectorized functions (e.g. pd.Series.str.replace())
.. code-block:: python
%%piper
sample_data()
>> select(['-ids', '-regions'])
>> across(columns='values_1', function=lambda x: x.astype(int), series_obj=True)
>> across(columns=['values_1'], function=lambda x: x.astype(int), series_obj=True)
>> head(tablefmt='plain')
dates order_dates countries values_1 values_2
0 2020-01-01 00:00:00 2020-01-07 00:00:00 Italy 311 26
1 2020-01-02 00:00:00 2020-01-08 00:00:00 Portugal 150 375
2 2020-01-03 00:00:00 2020-01-09 00:00:00 Spain 396 88
3 2020-01-04 00:00:00 2020-01-10 00:00:00 Italy 319 233
'''
if isinstance(df, pd.Series):
raise TypeError('Please specify DataFrame object')
if function is None:
raise ValueError('Please specify function to apply')
if isinstance(columns, str):
if columns not in df.columns:
raise ValueError(f'column {columns} not found')
if isinstance(columns, tuple):
columns = df.loc[:, slice(*columns)].columns.tolist()
if isinstance(columns, list):
for col in columns:
if col not in df.columns:
raise ValueError(f'column {col} not found')
if isinstance(columns, str):
# If not series function (to be applied to series values)
if not series_obj:
df[columns] = df[columns].apply(function, *args, **kwargs)
else:
df[[columns]] = df[[columns]].apply(function, *args, **kwargs)
try:
# No columns -> Apply with context of ALL dataframe columns
if columns is None:
df = df.apply(function, *args, **kwargs)
return df
# Specified group of columns to update.
if isinstance(columns, list):
# Apply function to each columns 'values'
if not series_obj:
for col in columns:
df[col] = df[col].apply(function, *args, **kwargs)
# No, user wants to use/access pandas Series object attributes
# e.g. str, astype etc.
else:
df[columns] = df[columns].apply(function, *args, **kwargs)
except ValueError as e:
logger.info(e)
msg = 'Are you trying to apply a function working with Series values(s)? Try series_obj=False'
raise ValueError(msg)
except AttributeError as e:
logger.info(e)
msg = 'Are you trying to apply a function using Series object(s)? Try series_obj=True'
raise AttributeError(msg)
return df
# adorn() {{{1
def adorn(df: pd.DataFrame,
columns: Union[str, list] = None,
fillna: Union[str, int] = '',
col_row_name: str = 'All',
axis: Union [int, str] = 0,
ignore_index: bool = False) -> pd.DataFrame:
'''add totals to a dataframe
Based on R janitor package function add row and/or column totals to a
dataframe.
Examples
--------
.. code-block::
df = sample_matrix(seed=42)
df = adorn(df, ['a', 'c'], axis='row')
head(df, 10, tablefmt='plain')
a b c d e
0 15 8.617 16 25.23 7.658
1 8 25.792 18 5.305 15.426
2 5 5.343 12 -9.133 -7.249
3 4 -0.128 13 0.92 -4.123
4 25 7.742 11 -4.247 4.556
All 57 70
.. code-block::
url = 'https://github.com/datagy/pivot_table_pandas/raw/master/sample_pivot.xlsx'
df = pd.read_excel(url, parse_dates=['Date'])
head(df)
Date Region Type Units Sales
2020-07-11 East Children's Clothing 18.0 306
2020-09-23 North Children's Clothing 14.0 448
g1 = df.groupby(['Type', 'Region']).agg(TotalSales=('Sales', 'sum')).unstack()
g1 = adorn(g1, axis='both').astype(int)
g1 = flatten_names(g1, remove_prefix='TotalSales')
g1
East North South West All
Children's Clothing 45849 37306 18570 20182 121907
Men's Clothing 51685 39975 18542 19077 129279
Women's Clothing 70229 61419 22203 22217 176068
All 167763 138700 59315 61476 427254
Parameters
----------
df
Pandas dataframe
columns
columns to be considered on the totals row. Default None - All columns
considered.
fillna
fill NaN values (default is '')
col_row_name
name of row/column title (default 'Total')
axis
axis to apply total (values: 0 or 'row', 1 or 'column')
To apply totals to both axes - use 'both'. (default is 0)
ignore_index
default False. When concatenating totals, ignore index in both
dataframes.
Returns
-------
A pandas DataFrame with additional totals row and/or column total.
'''
# ROW:
if axis == 0 or axis == 'row' or axis == 'both':
if columns is None:
numeric_columns = df.select_dtypes(include='number').columns
else:
if isinstance(columns, str):
numeric_columns = [columns]
else:
numeric_columns = columns
totals = {col: df[col].sum() for col in numeric_columns}
index_length = len(df.index.names)
if index_length == 1:
row_total = pd.DataFrame(totals, index=[col_row_name])
df = pd.concat([df, row_total], axis=0, ignore_index=ignore_index)
if ignore_index:
# Place row total name column before first numeric column
first_col = df.select_dtypes(include='number').columns[0]
first_pos = df.columns.get_loc(first_col)
if first_pos >= 1:
tot_colpos = first_pos - 1
df.iloc[-1, tot_colpos] = col_row_name
else:
total_row = ['' for _ in df.index.names]
total_row[index_length - 1] = col_row_name
index = tuple(total_row)
for col, total in totals.items():
df.loc[index, col] = total
# COLUMN: Sum numeric column(s) and concatenate result to dataframe
if axis == 1 or axis == 'column' or axis == 'both':
totals = pd.DataFrame()
if columns is None:
columns = df.select_dtypes(include='number').columns
else:
if isinstance(columns, str):
columns = [columns]
totals[col_row_name] = df[columns].sum(axis=1)
df = pd.concat([df, totals], axis=1)
if fillna is not None:
df = df.fillna(fillna)
return df
# assign() {{{1
def assign(df: pd.DataFrame,
*args,
**kwargs) -> pd.DataFrame:
'''Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
df
pandas dataframe
args
arbitrary arguments (for future use)
**kwargs
dict of {str: callable or Series}
The column names are keywords. If the values are callable, they are
computed on the DataFrame and assigned to the new columns. The callable
must not change input DataFrame (though pandas doesn't check it). If the
values are not callable, (e.g. a Series, scalar, or array), they are
simply assigned.
.. note::
If you wish to apply a function to a columns set of values:
pass a tuple with column name and function to call.
For example:
.. code-block::
assign(reversed=('regions', lambda x: x[::-1]))
is converted to:
.. code-block::
assign(reversed=lambda x: x['regions'].apply(lambda x: x[::-1]))
Returns
-------
A pandas DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
Later items in '\\*\\*kwargs' may refer to newly created or modified
columns in 'df'; items are computed and assigned into 'df' in order.
Examples
--------
You can create a dictionary of column names to corresponding functions,
then pass the dictionary to the assign function as shown below:
.. code-block::
%%piper --dot
sample_data()
.. assign(**{'reversed': ('regions', lambda x: x[::-1]),
'v1_x_10': lambda x: x.values_1 * 10,
'v2_div_4': lambda x: x.values_2 / 4,
'dow': lambda x: x.dates.dt.day_name(),
'ref': lambda x: x.v2_div_4 * 5,
'ids': lambda x: x.ids.astype('category')})
.. across(['values_1', 'values_2'], lambda x: x.astype(float),
series_obj=True)
.. relocate('dow', 'after', 'dates')
.. select(['-dates', '-order_dates'])
.. head(tablefmt='plain')
dow countries regions ids values_1 values_2 reversed v1_x_10 v2_div_4 ref
0 Wednesday Italy East A 311 26 tsaE 3110 6 32
1 Thursday Portugal South D 150 375 htuoS 1500 94 469
2 Friday Spain East A 396 88 tsaE 3960 22 110
3 Saturday Italy East B 319 233 tsaE 3190 58 291
.. code-block::
%%piper
sample_sales()
>> select()
>> assign(month_plus_one = lambda x: x.month + pd.Timedelta(1, 'D'),
alt_formula = lambda x: x.actual_sales * .2)
>> select(['-target_profit'])
>> assign(profit_sales = lambda x: x.actual_profit - x.actual_sales,
month_value = lambda x: x.month.dt.month,
product_added = lambda x: x['product'] + 'TEST',
salesthing = lambda x: x.target_sales.sum())
>> select(['-month', '-actual_sales', '-actual_profit', '-month_value', '-location'])
>> assign(profit_sales = lambda x: x.profit_sales.astype(int))
>> reset_index(drop=True)
>> head(tablefmt='plain')
product target_sales month_plus_one alt_formula profit_sales product_added salesthing
0 Beachwear 31749 2021-01-02 5842 -27456 BeachwearTEST 5423715
1 Beachwear 37833 2021-01-02 6810 -28601 BeachwearTEST 5423715
2 Jeans 29485 2021-01-02 6310 -27132 JeansTEST 5423715
3 Jeans 37524 2021-01-02 8180 -36811 JeansTEST 5423715
'''
if kwargs:
for keyword, value in kwargs.items():
if isinstance(value, tuple):
column_to_apply_function, function = value
new_function = lambda x: x[column_to_apply_function].apply(function)
kwargs[keyword] = new_function
try:
df = df.assign(*args, **kwargs)
except ValueError as e:
logger.info(e)
raise ValueError('''Try assign(column='column_to_apply_func', function)''')
return df
# count() {{{1
def count(df: pd.DataFrame,
columns: Union[str, list] = None,
totals_name: str = 'n',
percent: bool = True,
cum_percent: bool = True,
threshold: int = 100,
round: int = 2,
totals: bool = False,
sort_values: bool = False,
reset_index: bool = False,
shape: bool = True):
'''show column/category/factor frequency
For selected column or multi-index show the frequency count, frequency %,
cum frequency %. Also provides optional totals.
Examples
--------
.. code-block::
import numpy as np
import pandas as pd
from piper.defaults import *
np.random.seed(42)
id_list = ['A', 'B', 'C', 'D', 'E']
s1 = pd.Series(np.random.choice(id_list, size=5), name='ids')
s2 = pd.Series(np.random.randint(1, 10, s1.shape[0]), name='values')
df = pd.concat([s1, s2], axis=1)
%%piper
count(df.ids, totals=True)
>> head(tablefmt='plain')
n % cum %
E 3 60 60.0
C 1 20 80.0
D 1 20 100.0
Total 5 100
%piper df >> count('ids', totals=False, cum_percent=False) >> head(tablefmt='plain')
ids n %
E 3 60
C 1 20
D 1 20
%%piper
df
>> count(['ids'], sort_values=None, totals=True)
>> head(tablefmt='plain')
n % cum %
C 1 20 20.0
D 1 20 40.0
E 3 60 100.0
Total 5 100
Parameters
----------
df
dataframe reference
columns
dataframe columns/index to be used in groupby function
totals_name
name of total column, default 'n'
percent
provide % total, default True
cum_percent
provide cum % total, default True
threshold
filter cum_percent by this value, default 100
round
round decimals, default 2
totals
add total column, default False
sort_values
default False, None means use index sort
reset_index
default False
shape
default True. Show shape information as a logger.info() message
Returns
-------
A pandas dataframe
'''
# NOTE:: pd.groupby by default does not count nans!
f = lambda x: x.value_counts(dropna=False)
try:
if isinstance(columns, str):
p1 = df.groupby(columns, dropna=False).agg(totals=(columns, f))
elif isinstance(df, pd.Series):
new_df = df.to_frame()
columns = new_df.columns.tolist()[0]
p1 = new_df.groupby(columns, dropna=False).agg(totals=(columns, f))
elif isinstance(df, pd.DataFrame):
if columns is not None:
p1 = df.groupby(columns, dropna=False).agg(totals=(columns[0], f))
else:
p1 = df.count().to_frame()
except (ValueError) as e:
p1 = df[columns].value_counts().to_frame()
p1.columns = ['totals']
except (KeyError, AttributeError) as e:
logger.info(f"Column {columns} not found!")
return
if p1.shape[0] > 0:
p1.columns = [totals_name]
if sort_values is not None:
p1.sort_values(by=totals_name, ascending=sort_values, inplace=True)
if percent:
func = lambda x : (x*100/x.sum()).round(round)
p1['%'] = func(p1[totals_name].values)
if cum_percent:
p1['cum %'] = (p1[totals_name].cumsum() * 100 / p1[totals_name].sum()).round(round)
if threshold < 100:
p1 = p1[p1['cum %'] <= threshold]
if reset_index:
p1 = p1.reset_index()
if totals:
cols = ['n']
if percent:
cols.append('%')
p1 = adorn(p1, columns=cols, col_row_name='Total',
ignore_index=reset_index)
if shape:
_shape(p1)
return p1
# clean_names() {{{1
def clean_names(df: pd.DataFrame,
case: str = 'snake',
title: bool = False) -> pd.DataFrame:
'''Clean column names, strip blanks, lowercase, snake_case.
Also removes awkward characters to allow for easier manipulation.
(optionally 'title' each column name.)
Examples
--------
.. code-block::
column_list = ['dupe**', 'Customer ', 'mdm no. to use', 'Target-name ',
' Public', '_ Material', 'Prod type', '#Effective ',
'Expired', 'Price% ', 'Currency$']
df = pd.DataFrame(None, columns=column_list)
df.columns.tolist()
['dupe**', 'Customer ', 'mdm no. to use', 'Target-name ', ' Public',
'_ Material', 'Prod type', '#Effective ', 'Expired', 'Price% ', 'Currency$']
.. code-block::
df = clean_names(df)
df.columns.tolist()
['dupe', 'customer', 'mdm_no_to_use', 'target_name', 'public', 'material',
'prod_type', 'effective', 'expired', 'price', 'currency']
.. code-block::
df = clean_names(df, case='report', title=True)
df.columns.tolist()
['Dupe', 'Customer', 'Mdm No To Use', 'Target Name', 'Public', 'Material',
'Prod Type', 'Effective', 'Expired', 'Price', 'Currency']
Parameters
----------
df
pandas dataframe
case
requested case format:
- 'snake': this_is_snake_case
- 'camel': thisIsCamelCase (title=False)
- 'camel': ThisIsCamelCase (title=True)
- 'report': this is report format (title=False)
- 'report': This Is Report Format (title=True)
title
default False. If True, titleize column values
Returns
-------
pandas DataFrame object
'''
def camel_case(string, title=False):
''' convert from blank delimitted words to camel case '''
string = re.sub(r"(_|\s)+", " ", string).title().replace(" ", "")
if title:
return string
return string[0].lower() + string[1:]
columns = [x.strip().lower() for x in df.columns]
# Remove special chars at the beginning and end of column names
special_chars = r'[\.\*\#\%\$\-\_]+'
columns = [re.sub(f'^{special_chars}', '', x) for x in columns]
columns = [re.sub(f'{special_chars}$', '', x) for x in columns]
# Any embedded special characters in the middle of words, replace with a blank
columns = [re.sub(f'{special_chars}', ' ', x) for x in columns]
if case == 'snake':
from_char, to_char = tuple = (' ', '_')
elif case == 'report':
# No conversions needed
from_char, to_char = tuple = (' ', ' ')
elif case == 'camel':
# Just in case converting from snake case
from_char, to_char = tuple = ('_', ' ')
# All special chars should now be removed, except for to_char perhaps
# and embedded spaces.
columns = [re.sub(f'{to_char}+', ' ', x) for x in columns]
# Strip any remaining blanks prepended or appended and lowercase all values
columns = [x.strip().lower() for x in columns]
# Replace any remaining embedded blanks with single replacement 'to_char' value.
columns = [re.sub('\s+', to_char, x) for x in columns]
if case == 'snake':
columns = [re.sub(from_char, to_char, x) for x in columns]
if case == 'report':
columns = [re.sub(from_char, to_char, x).title() for x in columns]
if case == 'camel':
columns = [camel_case(x, title=title) for x in columns]
df.columns = columns
return df
# distinct() {{{1
def distinct(df: pd.DataFrame,
*args,
shape: bool = False,
**kwargs) -> pd.DataFrame:
'''select distinct/unique rows
This is a wrapper function rather than using e.g. df.drop_duplicates()
For details of args, kwargs - see help(pd.DataFrame.drop_duplicates)
Examples
--------
.. code-block::
from piper.verbs import select, distinct
from piper.factory import sample_data
df = sample_data()
df = select(df, ['countries', 'regions', 'ids'])
df = distinct(df, 'ids', shape=False)
expected = (5, 3)
actual = df.shape
Parameters
----------
df
dataframe
shape
default False. If True, show shape information as a logger.info() message
*args
arguments for wrapped function
**kwargs
keyword-parameters for wrapped function
Returns
-------
A pandas DataFrame
'''
result = df.drop_duplicates(*args, **kwargs)
if shape:
_shape(df)
return result
# drop() {{{1
def drop(df: pd.DataFrame,
*args,
**kwargs) -> pd.DataFrame:
'''drop column(s)
This is a wrapper function rather than using e.g. df.drop()
For details of args, kwargs - see help(pd.DataFrame.drop)
Examples
--------
.. code-block::
df <- pd.read_csv('inputs/export.dsv', sep='\t')
>> clean_names()
>> trim()
>> assign(adast = lambda x: x.adast.astype('category'),
adcrcd = lambda x: x.adcrcd.astype('category'),
aduom = lambda x: x.aduom.astype('category'))
>> drop(columns='adcrcd_1')
Parameters
----------
df
dataframe
*args
arguments for wrapped function
**kwargs
keyword-parameters for wrapped function
Returns
-------
A pandas DataFrame
'''
return df.drop(*args, **kwargs)
# drop_if() {{{1
def drop_if(df: pd.DataFrame,
value: Union[str, int, float] = None,
how: str = 'all') -> pd.DataFrame:
''' drop columns containing blanks, zeros or na
Examples
--------
.. code-block::
df = dummy_dataframe()
head(df, tablefmt='plain')
zero_1 zero_2 zero_3 zero_4 zero_5 blank_1 blank_2 blank_3 blank_4 blank_5
0 0 0 0 0 0
1 0 0 0 0 0
2 0 0 0 0 0
3 0 0 0 0 0
.. code-block::
df = drop_if(df)
head(df, tablefmt='plain')
zero_1 zero_2 zero_3 zero_4 zero_5
0 0 0 0 0 0
1 0 0 0 0 0
2 0 0 0 0 0
3 0 0 0 0 0
Parameters
----------
df
pandas dataframe
value
Default is None. Drop a column if it contains a blank value in every row.
Enter a literal string or numeric value to check against.
Special value - 'isna'. If specified, depending on the 'how' parameter
drop columns containing na / null values.
how
{None, 'any', 'all'}, default 'all'.
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
Returns
-------
A pandas DataFrame
'''
if value == 'isna':
df = df.dropna(axis=1, how=how)
else:
if value is None:
value = ""
for col in df.columns:
drop_column = np.where(df[col] == value, 1, 0).sum() == df.shape[0]
if drop_column:
df = df.drop(columns=col, axis=1)
return df
# duplicated() {{{1
def duplicated(df: pd.DataFrame,
subset: Union[str, List[str]] = None,
keep: bool = False,
sort: bool = True,
column: str = 'duplicate',
ref_column: str = None,
duplicates: bool = False,
loc: str = 'first') -> pd.DataFrame:
'''locate duplicate data
.. note::
Returns a copy of the input dataframe object
Examples
--------
.. code-block::
from piper.factory import simple_series
df = simple_series().to_frame()
df = duplicated(df, keep='first', sort=True)
df
ids duplicate
2 C False
0 D False
1 E False
3 E True
Parameters
----------
df
pandas dataframe
subset
column label or sequence of labels, required
Only consider certain columns for identifying duplicates
Default None - consider ALL dataframe columns
keep
{‘first’, ‘last’, False}, default ‘first’
first : Mark duplicates as True except for the first occurrence.
last : Mark duplicates as True except for the last occurrence.
False : Mark all duplicates as True.
sort
If True sort returned dataframe using subset fields as key
column
Insert a column name identifying whether duplicate
(True/False), default 'duplicate'
duplicates
Default True. Return only duplicate key rows
Returns
-------
pandas dataframe
'''
df = _dataframe_copy(df, inplace=False)
if subset is None:
subset = df.columns.tolist()
df[column] = df.duplicated(subset, keep=keep)
if duplicates:
df = df[df[column]]
if sort:
df = df.sort_values(subset)
df = relocate(df, column=column, loc=loc)
return df
# explode() {{{1
def explode(df: pd.DataFrame,
*args,
**kwargs) -> pd.DataFrame:
'''Transform list-like column values to rows
This is a wrapper function rather than using e.g. df.explode()
For details of args, kwargs - see help(pd.DataFrame.explode)
Examples
--------
.. code-block::
from piper.factory import sample_data
df = sample_data()
df = group_by(df, 'countries')
df = summarise(df, ids=('ids', set))
df.head()
countries ids
Italy {'B', 'C', 'D', 'A', 'E'}
Portugal {'B', 'C', 'A', 'D', 'E'}
Spain {'B', 'C', 'D', 'A', 'E'}
Switzerland {'B', 'C', 'A', 'D', 'E'}
Sweden {'B', 'C', 'A', 'D', 'E'}
.. code-block::
explode(df, 'ids').head(8)
countries ids
Italy B
Italy C
Italy D
Italy A
Italy E
Portugal B
Portugal C
Portugal A
Parameters
----------
df
dataframe
*args
arguments for wrapped function
**kwargs
keyword-parameters for wrapped function
Returns
-------
A pandas DataFrame
'''
return df.explode(*args, **kwargs)
# flatten_names() {{{1
def flatten_names(df: pd.DataFrame,
join_char: str = '_',
remove_prefix=None) -> pd.DataFrame:
'''Flatten multi-index column headings
Examples
--------
.. code-block::
from piper.defaults import *
%%piper
sample_data()
>> group_by(['countries', 'regions'])
>> summarise(totalval1=('values_1', 'sum'))
>> assign(mike=lambda x: x.totalval1 * 50,
eoin=lambda x: x.totalval1 * 100)
>> unstack()
>> flatten_names()
>> select(('totalval1_East', 'totalval1_West'))
>> head(tablefmt='plain')
countries totalval1_East totalval1_North totalval1_South totalval1_West
France 2170 2275 2118 4861
Germany 1764 2239 1753 1575
Italy 3023 1868 2520 2489
Norway 3741 2633 1670 1234
Parameters
----------
df
pd.DataFrame - dataframe
join_char
delimitter joining 'prefix' value(s) to be removed.
remove_prefix
string(s) (delimitted by pipe '|')
e.g. ='mike|eoin|totalval1'
Returns
-------
A pandas dataframe
'''
def _flatten(column_string, join_char='_', remove_prefix=None):
''' Takes a dataframe column string value.
if tuple (from a groupby/stack/pivot df) returns a 'joined' string
otherwise returns the original string.
'''
modified_string = column_string
if isinstance(modified_string, tuple):
modified_string = join_char.join([str(x) for x in modified_string]).strip(join_char)
if isinstance(modified_string, str):
if remove_prefix:
modified_string = re.sub(remove_prefix, '', modified_string).strip(join_char)
return modified_string
df.columns = [_flatten(x, join_char, remove_prefix) for x in df.columns]
return df
# fmt_dateidx() {{{1
def fmt_dateidx(df: pd.DataFrame,
freq: str = 'D') -> pd.DataFrame:
'''format dataframe datelike index
Checks if dataframe index contains a date-like grouper object.
If so, applies the 'frequency' string given.
Examples
--------
Parameters
----------
df
dataframe
freq
Default 'd' (days)
See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
for list of valid frequency strings.
Returns
-------
A pandas DataFrame
'''
fmts = {'A': '%Y', 'AS': '%Y', 'Q': '%b %Y', 'M': '%b', 'D': '%Y-%m-%d'}
# Reset index, cannot work out how to update date index column with
# index set.
index = df.index.names
df = df.reset_index()
for col in index:
if is_datetime64_any_dtype(df[col].dtype):
df[col] = df[col].dt.strftime(fmts.get(freq))
df = df.set_index(index)
return df
# group_by() {{{1
def group_by(df: pd.DataFrame,
*args,
freq: str = 'D',
**kwargs) -> pd.DataFrame.groupby:
'''Group by dataframe
This is a wrapper function rather than using e.g. df.groupby()
For details of args, kwargs - see help(pd.DataFrame.groupby)
The first argument or by keyword are checked in turn and converted
to pd.Grouper objects. If any group fields are 'date' like, they
can be represented as various 'frequencies' types.
See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
for list of valid frequency strings.
Examples
--------
.. code-block::
%%piper
sample_data()
>> where("ids == 'A'")
>> where("values_1 > 300 & countries.isin(['Italy', 'Spain'])")
>> group_by(['countries', 'regions'])
>> summarise(total_values_1=pd.NamedAgg('values_1', 'sum'))
Parameters
----------
df
dataframe
*args
arguments for wrapped function
freq
Default 'd' (days)
**kwargs
keyword-parameters for wrapped function
Returns
-------
pandas DataFrame object
'''
args_copy = list(args)
logger.debug(args_copy)
logger.debug(kwargs)
if kwargs.get('by') != None:
index = _set_grouper(df, kwargs.get('by'), freq=freq)
kwargs['by'] = index
if len(args) > 0:
args_copy[0] = _set_grouper(df, args[0], freq=freq)
df = df.groupby(*args_copy, **kwargs)
return df
# head() {{{1
def head(df: pd.DataFrame,
n: int = 4,
shape: bool = True,
tablefmt: bool = False,
precision: int = 0) -> pd.DataFrame:
'''show first n records of a dataframe.
Like the corresponding R function, displays the first n records.
Alternative to using df.head()
Examples
--------
.. code-block::
head(df)
Parameters
----------
df
pandas dataframe
n
Default n=4. number of rows to display
shape
Default True. Show shape information
tablefmt
Default False. If supplied, tablefmt keyword value can be any valid
format supplied by the tabulate package
precision
Default 0. Number precision shown if tablefmt specified
Returns
-------
A pandas dataframe
'''
if shape:
_shape(df)
if tablefmt:
print(df.head(n=n)
.to_markdown(tablefmt=tablefmt,
floatfmt=f".{precision}f"))
else:
return df.head(n=n)
# info() {{{1
def info(df,
n_dupes: bool = False,
fillna: bool = False,
memory_info: bool = True) -> pd.DataFrame:
'''show dataframe meta data
Provides a summary of the structure of the dataframe data.
Examples
--------
.. code-block::
import numpy as np
import pandas as pd
from piper.verbs import info
np.random.seed(42)
id_list = ['A', 'B', 'C', 'D', 'E']
s1 = pd.Series(np.random.choice(id_list, size=5), name='ids')
s2 = pd.Series(np.random.randint(1, 10, s1.shape[0]), name='values')
df = pd.concat([s1, s2], axis=1)
columns type n isna isnull unique
0 ids object 5 0 0 3
1 values int64 5 0 0 4
Parameters
----------
df
a pandas dataframe
n_dupes
default False. Column showing the numbers of groups of duplicate records
fillna
default: False. Filter out only columns which contain nulls/np.nan
memory
If True, show dataframe memory consumption in mb
Returns
-------
A pandas dataframe
'''
null_list = [df[col].isnull().sum() for col in df]
na_list = [df[col].isna().sum() for col in df]
dtypes_list = [df[col].dtype for col in df]
nunique_list = [df[col].nunique() for col in df]
dupes_list = [df.duplicated(col, keep=False).sum() for col in df]
total_list = [df[col].value_counts(dropna=False).sum() for col in df]
infer_list = [pd.api.types.infer_dtype(df[col]) for col in df]
dicta = {'columns': df.columns.values, 'type': dtypes_list,
'inferred': infer_list, 'n': total_list, 'isna': na_list,
'isnull': null_list, 'unique': nunique_list}
if n_dupes:
dicta.update({'n dupes': dupes_list})
dr = pd.DataFrame.from_dict(dicta)
if fillna:
return dr.query('isna > 0')
if memory_info:
memory(df)
return dr
# inner_join() {{{1
def inner_join(df: pd.DataFrame, *args, **kwargs) -> pd.DataFrame:
'''df (All) | df2 (All) matching records only
This is a wrapper function rather than using e.g. df.merge(how='inner')
For details of args, kwargs - see help(pd.DataFrame.merge)
Examples
--------
.. code-block::
order_data = {'OrderNo': [1001, 1002, 1003, 1004, 1005],
'Status': ['A', 'C', 'A', 'A', 'P'],
'Type_': ['SO', 'SA', 'SO', 'DA', 'DD']}
orders = pd.DataFrame(order_data)
status_data = {'Status': ['A', 'C', 'P'],
'description': ['Active', 'Closed', 'Pending']}
statuses = pd.DataFrame(status_data)
order_types_data = {'Type_': ['SA', 'SO'],
'description': ['Sales Order', 'Standing Order'],
'description_2': ['Arbitrary desc', 'another one']}
types_ = pd.DataFrame(order_types_data)
%%piper
orders >> inner_join(types_, suffixes=('_orders', '_types'))
| OrderNo | Status | Type_ | description | description_2 |
|----------:|:---------|:--------|:---------------|:----------------|
| 1001 | A | SO | Standing Order | another one |
| 1003 | A | SO | Standing Order | another one |
| 1002 | C | SA | Sales Order | Arbitrary desc |
Parameters
----------
df
dataframe
*args
arguments for wrapped function
**kwargs
keyword-parameters for wrapped function
Returns
-------
A pandas DataFrame
'''
kwargs['how'] = 'inner'
logger.debug(f"{kwargs}")
return df.merge(*args, **kwargs)
# left_join() {{{1
def left_join(df: pd.DataFrame, *args, **kwargs) -> pd.DataFrame:
'''df (All) | df2 (All/na) df always returned
This is a wrapper function rather than using e.g. df.merge(how='left')
For details of args, kwargs - see help(pd.DataFrame.merge)
Examples
--------
.. code-block::
order_data = {'OrderNo': [1001, 1002, 1003, 1004, 1005],
'Status': ['A', 'C', 'A', 'A', 'P'],
'Type_': ['SO', 'SA', 'SO', 'DA', 'DD']}
orders = pd.DataFrame(order_data)
status_data = {'Status': ['A', 'C', 'P'],
'description': ['Active', 'Closed', 'Pending']}
statuses = pd.DataFrame(status_data)
order_types_data = {'Type_': ['SA', 'SO'],
'description': ['Sales Order', 'Standing Order'],
'description_2': ['Arbitrary desc', 'another one']}
types_ = pd.DataFrame(order_types_data)
%%piper
orders >> left_join(types_, suffixes=('_orders', '_types'))
| OrderNo | Status | Type_ | description | description_2 |
|----------:|:---------|:--------|:---------------|:----------------|
| 1001 | A | SO | Standing Order | another one |
| 1002 | C | SA | Sales Order | Arbitrary desc |
| 1003 | A | SO | Standing Order | another one |
| 1004 | A | DA | nan | nan |
| 1005 | P | DD | nan | nan |
'''
kwargs['how'] = 'left'
logger.debug(f"{kwargs}")
return df.merge(*args, **kwargs)
# memory() {{{1
def memory(df: pd.DataFrame) -> pd.DataFrame:
'''show dataframe consumed memory (mb)
Examples
--------
.. code-block::
from piper.factory import sample_sales
from piper.verbs import memory
import piper
memory(sample_sales())
>> Dataframe consumes 0.03 Mb
Parameters
----------
df
pandas dataframe
Returns
-------
A pandas dataframe
'''
memory = df.memory_usage(deep=True).sum()
memory = round(memory / (1024 * 1024), 2)
msg = f'Dataframe consumes {memory} Mb'
logger.info(msg)
return df
# names() {{{1
def names(df: pd.DataFrame,
regex: str = None,
astype: str = 'list') -> Union[str, list, dict, pd.Series, pd.DataFrame]:
'''show dataframe column information
This function is useful reviewing or manipulating column(s)
The dictionary output is particularly useful when composing the rename of
multiple columns.
Examples
--------
.. code-block::
import numpy as np
import pandas as pd
id_list = ['A', 'B', 'C', 'D', 'E']
s1 = pd.Series(np.random.choice(id_list, size=5), name='ids')
region_list = ['East', 'West', 'North', 'South']
s2 = pd.Series(np.random.choice(region_list, size=5), name='regions')
df = pd.concat([s1, s2], axis=1)
names(df, 'list')
['ids', 'regions']
Parameters
----------
df
dataframe
regex
Default None. regular expression to 'filter' list of returned columns.
astype
Default 'list'. See return options below:
- 'dict' returns a dictionary object
- 'list' returns a list object
- 'text' returns columns joined into a text string
- 'series' returns a pd.Series
- 'dataframe' returns a pd.DataFrame
Returns
-------
dictionary, list, str, pd.Series, pd.DataFrame
'''
cols = df.columns.tolist()
if regex:
cols = list(filter(re.compile(regex).match, cols))
if astype == 'dict':
return {x: x for x in cols}
elif astype == 'list':
return cols
elif astype == 'text':
return "['" +"', '".join(cols)+ "']"
elif astype == 'dataframe':
return pd.DataFrame(cols, columns = ['column_names'])
elif astype == 'series':
return pd.Series(cols, name='column_names')
else:
# note: mypy needs to see this 'dummy' catch all return statement
return None
# non_alpha() {{{1
def non_alpha(df: pd.DataFrame, col_name: str) -> pd.DataFrame:
'''check for non-alphanumeric characters
Parameters
----------
df
pandas dataframe
col_name
name of column to be checked
Returns
-------
pandas dataframe with additional column containing True/False
tests of the selected column having special characters
'''
pattern = r'[a-zA-Z0-9]*$'
df['non_alpha'] = ~df[col_name].str.match(pattern)
return df
# order_by() {{{1
def order_by(df: pd.DataFrame, *args, **kwargs) -> pd.DataFrame:
'''order/sequence dataframe
This is a wrapper function rather than using e.g. df.sort_values()
For details of args, kwargs - see help(pd.DataFrame.sort_values)
.. note::
The first argument and/or keyword 'by' is checked to see:
- For each specified column value
If it starts with a minus sign, assume ascending=False
Examples
--------
.. code-block::
%%piper
sample_data()
>> group_by(['countries', 'regions'])
>> summarise(totalval1=('values_1', 'sum'))
>> group_calc(index='countries')
>> order_by(['countries', '-group%'])
>> head(8)
| | totalval1 | group% |
|:---------------------|------------:|---------:|
| ('France', 'West') | 4861 | 42.55 |
| ('France', 'North') | 2275 | 19.91 |
| ('France', 'East') | 2170 | 19 |
| ('France', 'South') | 2118 | 18.54 |
| ('Germany', 'North') | 2239 | 30.54 |
| ('Germany', 'East') | 1764 | 24.06 |
| ('Germany', 'South') | 1753 | 23.91 |
| ('Germany', 'West') | 1575 | 21.48 |
Parameters
----------
df
dataframe
*args
arguments for wrapped function
**kwargs
keyword-parameters for wrapped function
Returns
-------
A pandas DataFrame
'''
# args is a tuple, therefore needs to be cast to a list for manipulation
args_copy = list(args)
if kwargs.get('by'):
column_seq = kwargs.get('by')
else:
if isinstance(args_copy[0], str) or isinstance(args_copy[0], list):
column_seq = args_copy[0]
f = lambda x: True if not x.startswith('-') else False
if isinstance(column_seq, list):
sort_seq = [f(x) for x in column_seq]
# Set sort sequence - only if NOT specified by user
if kwargs.get('ascending') is None:
kwargs['ascending'] = sort_seq
f = lambda ix, x: column_seq[ix] if x else column_seq[ix][1:] #type: ignore
if args_copy != []:
args_copy[0] = [f(ix, x) for ix, x in enumerate(sort_seq)] #type: ignore
else:
kwargs['by'] = [f(ix, x) for ix, x in enumerate(sort_seq)] #type: ignore
else:
# Assume ascending sequence, unless otherwise specified
if kwargs.get('ascending') is None:
kwargs['ascending'] = f(column_seq) #type: ignore
if kwargs['ascending'] == False:
column_seq = column_seq[1:] #type: ignore
if args_copy != []:
args_copy[0] = column_seq
else:
kwargs['by'] = column_seq
return df.sort_values(*args_copy, **kwargs)
# outer_join() {{{1
def outer_join(df: pd.DataFrame, *args, **kwargs) -> pd.DataFrame:
'''df (All/na) | df2 (All/na) All rows returned
This is a wrapper function rather than using e.g. df.merge(how='outer')
For details of args, kwargs - see help(pd.DataFrame.merge)
Examples
--------
.. code-block::
order_data = {'OrderNo': [1001, 1002, 1003, 1004, 1005],
'Status': ['A', 'C', 'A', 'A', 'P'],
'Type_': ['SO', 'SA', 'SO', 'DA', 'DD']}
orders = pd.DataFrame(order_data)
status_data = {'Status': ['A', 'C', 'P'],
'description': ['Active', 'Closed', 'Pending']}
statuses = pd.DataFrame(status_data)
order_types_data = {'Type_': ['SA', 'SO'],
'description': ['Sales Order', 'Standing Order'],
'description_2': ['Arbitrary desc', 'another one']}
types_ = pd.DataFrame(order_types_data)
%%piper
orders >> outer_join(types_, suffixes=('_orders', '_types'))
.. code-block::
OrderNo Status Type_ description description_2
1001 A SO Standing Order another one
1003 A SO Standing Order another one
1002 C SA Sales Order Arbitrary desc
1004 A DA nan nan
1005 P DD nan nan
'''
kwargs['how'] = 'outer'
logger.debug(f"{kwargs}")
return df.merge(*args, **kwargs)
# overlaps() {{{1
def overlaps(df: pd.DataFrame,
unique_key: Union[str, List[str]] = None,
start: str = 'effective',
end: str = 'expiry',
overlaps: str = 'overlaps') -> pd.DataFrame:
'''Analyse dataframe rows with overlapping date periods
Examples
--------
.. code-block::
data = {'prices': [100, 200, 300],
'contract': ['A', 'B', 'A'],
'effective': ['2020-01-01', '2020-03-03', '2020-05-30'],
'expired': ['2020-12-31', '2021-04-30', '2022-04-01']}
df = pd.DataFrame(data)
overlaps(df, start='effective', end='expired', unique_key='contract')
prices contract effective expired overlaps
0 100 A 2020-01-01 2020-12-31 True
1 200 B 2020-03-03 2021-04-30 False
2 300 A 2020-05-30 2022-04-01 True
Parameters
----------
df
dataframe
unique_key
column(s) that uniquely identify rows
start
column that defines start/effective date, default 'effective_date'
end
column that defines end/expired date, default 'expiry_date'
overlaps
default 'overlaps'. Name of overlapping column containing True/False values
Returns
-------
pandas dataframe with boolean based overlap column
'''
if unique_key is None:
raise ValueError('Please provide unique key.')
if isinstance(unique_key, str):
unique_key = [unique_key]
key_cols = unique_key + [start, end]
missing_cols = ', '.join([col for col in key_cols if col not in df.columns.tolist()])
if len(missing_cols) > 0:
raise KeyError(f"'{missing_cols}' not found in dataframe")
dfa = df[key_cols]
dfa.insert(0, 'index', df.index)
dfb = dfa
merged = dfa.merge(dfb, how='left', on=unique_key)
criteria_1 = merged[f'{end}_x'] >= merged[f'{start}_y']
criteria_2 = merged[f'{start}_x'] <= merged[f'{end}_y']
criteria_3 = merged.index_x != merged.index_y
merged[overlaps] = criteria_1 & criteria_2 & criteria_3
# Find unique keys with overlapping dates
merged[overlaps] = criteria_1 & criteria_2 & criteria_3
cols = unique_key + [overlaps]
# We need just the unique key and whether it overlaps or not
merged = merged[cols][merged[overlaps]].drop_duplicates()
merged = df.merge(merged, how='left', on=unique_key)
merged[overlaps] = merged[overlaps].fillna(False)
return merged
# pivot_longer() {{{1
# @wraps(pd.DataFrame.melt)
def pivot_longer(df: pd.DataFrame,
*args: Any,
**kwargs: Any) -> pd.DataFrame:
'''pivot dataframe wide to long
This is a wrapper function rather than using e.g. df.melt()
For details of args, kwargs - see help(pd.DataFrame.melt)
Parameters
----------
df
dataframe
*args
arguments for wrapped function
**kwargs
keyword-parameters for wrapped function
Returns
-------
A pandas DataFrame
'''
args_copy = deepcopy(list(args))
# if first argument / id_vars is a tuple, replace with 'range' of columns
if len(args_copy) > 0:
if isinstance(args_copy[0], tuple):
(from_col, to_col) = args_copy[0]
args_copy[0] = df.loc[:, from_col:to_col].columns.tolist()
logger.info(f'|Info| tuple passed, expanding to {args_copy[0]}')
if isinstance(kwargs.get('id_vars'), tuple):
(from_col, to_col) = kwargs.get('id_vars') # type: ignore
kwargs['id_vars'] = df.loc[:, from_col:to_col].columns.tolist()
logger.info(f"|Info| tuple passed, expanding to {kwargs['id_vars']}")
# if 2nd argument / value_vars is a tuple, replace with 'range' of columns
if len(args_copy) > 1:
if isinstance(args_copy[1], tuple):
(from_col, to_col) = args_copy[1] #type: ignore
args_copy[1] = df.loc[:, from_col:to_col].columns.tolist()
logger.info(f'|Info| tuple passed, expanding to {args_copy[1]}')
if isinstance(kwargs.get('value_vars'), tuple):
(from_col, to_col) = kwargs.get('value_vars') #type: ignore
kwargs['value_vars'] = df.loc[:, from_col:to_col].columns.tolist()
logger.info(f"|Info| tuple passed, expanding to {kwargs['value_vars']}")
return df.melt(*args_copy, **kwargs)
# pivot_table() {{{1
def pivot_table(df: pd.DataFrame,
*args,
freq: str = 'M',
format_date: bool = False,
**kwargs) -> pd.DataFrame:
'''create Excel like pivot table
This is a wrapper function rather than using e.g. df.pivot_table()
For details of args, kwargs - see help(pd.DataFrame.pivot_table)
Examples
--------
.. code-block::
from piper.verbs import pivot_wider
from piper.factory import sample_data
import piper.defaults
df = sample_data()
index=['dates', 'order_dates', 'regions', 'ids']
pvt = pivot_wider(df, index=index, freq='Q', format_date=True)
pvt.head()
| | values_1 | values_2 |
|:--------------------------------------|-----------:|-----------:|
| ('Mar 2020', 'Mar 2020', 'East', 'A') | 227.875 | 184.25 |
| ('Mar 2020', 'Mar 2020', 'East', 'B') | 203.2 | 168 |
| ('Mar 2020', 'Mar 2020', 'East', 'C') | 126 | 367 |
| ('Mar 2020', 'Mar 2020', 'East', 'D') | 125.667 | 259 |
| ('Mar 2020', 'Mar 2020', 'East', 'E') | 194 | 219 |
Parameters
----------
df
dataframe
*args
arguments for wrapped function
**kwargs
keyword-parameters for wrapped function
Returns
-------
pandas DataFrame object
'''
if kwargs.get('index') != None:
index = _set_grouper(df, kwargs.get('index'), freq=freq)
kwargs['index'] = index
df = pd.pivot_table(df, *args, **kwargs)
if format_date:
df = fmt_dateidx(df, freq=freq)
return df
# relocate() {{{1
def relocate(df: pd.DataFrame,
column: Union[str, list] = None,
loc: str = 'last',
ref_column: str = None,
index: bool = False) -> pd.DataFrame:
'''move column(s) in a dataframe
Based on the corresponding R function - relocate
Examples
--------
.. code-block::
%%piper
sample_matrix()
>> relocate('e', 'before', 'b')
>> select('-c')
>> where("a < 20 and b > 1")
>> order_by(['-d'])
>> head(5, tablefmt='plain')
a e b d
0 15 8 9 25
1 8 15 26 5
2 5 -7 5 -9
.. code-block::
%%piper
sample_sales()
>> select(('location', 'target_profit'))
>> pd.DataFrame.set_index(['location', 'product'])
>> head(tablefmt='plain')
month target_sales target_profit
('London', 'Beachwear') 2021-01-01 00:00:00 31749 1905
('London', 'Beachwear') 2021-01-01 00:00:00 37833 6053
('London', 'Jeans') 2021-01-01 00:00:00 29485 4128
('London', 'Jeans') 2021-01-01 00:00:00 37524 3752
%%piper
sample_sales()
>> select(('location', 'target_profit'))
>> pd.DataFrame.set_index(['location', 'product'])
>> relocate(column='location', loc='after', ref_column='product', index=True)
>> head(tablefmt='plain')
month target_sales target_profit
('Beachwear', 'London') 2021-01-01 00:00:00 31749 1905
('Beachwear', 'London') 2021-01-01 00:00:00 37833 6053
('Jeans', 'London') 2021-01-01 00:00:00 29485 4128
('Jeans', 'London') 2021-01-01 00:00:00 37524 3752
**NOTE**
If you omit the keyword parameters, it probably 'reads' better ;)
>> relocate(location', 'after', 'product', index=True)
Parameters
----------
df
dataframe
column
column name(s) to be moved
loc
default -'last'. The relative location to move the column(s) to.
Valid values are: 'first', 'last', 'before', 'after'.
ref_column
Default - None. Reference column
index
default is False (column). If True, then the column(s) being moved are
considered to be row indexes.
Returns
-------
A pandas dataframe
'''
def sequence(columns, index=False):
''' Return column or index sequence '''
if index:
return df.reorder_levels(columns)
else:
return df[columns]
if column is None:
raise KeyError(f'Please enter column(s) to be relocated/moved.')
if index:
type_ = 'index(es)'
df_cols = df.index.names.copy()
else:
type_ = 'column(s)'
df_cols = df.columns.tolist().copy()
if isinstance(column, str):
column = [column]
if isinstance(column, list):
errors = [x for x in column if x not in df_cols]
if errors != []:
logger.info(f'{type_} {errors} not found!')
return df
# Remove columns to move from main list
df_cols = [x for x in df_cols if x not in column]
if loc == 'first':
df_cols = column + df_cols
return sequence(df_cols, index=index)
elif loc == 'last':
df_cols = df_cols + column
return sequence(df_cols, index=index)
if loc == 'after':
position = 0
elif loc == 'before':
position = len(column)
new_column_sequence = []
for col in df_cols:
if col == ref_column:
column.insert(position, col)
new_column_sequence.append(column)
else:
new_column_sequence.append(col)
new_column_sequence = list(flatten(new_column_sequence))
return sequence(new_column_sequence, index=index)
# replace_names {{{1
def replace_names(df: pd.DataFrame,
dict_: Dict,
info: bool = False) -> pd.DataFrame:
""" replace column names (or partially) with dictionary values
Examples
--------
.. code-block::
dict_ = { 'number$': 'nbr', 'revenue per cookie': 'unit revenue', 'cost per cookie': 'unit cost',
'month': 'mth', 'revenue per cookie': 'unit revenue', 'product': 'item', 'year': 'yr'}
cols = ['Country', 'Product', 'Units Sold', 'Revenue per cookie', 'Cost per cookie',
'Revenue', 'Cost', 'Profit', 'Date', 'Month Number', 'Month Name', 'Year']
expected = ['country','item', 'units_sold', 'unit_revenue', 'unit_cost', 'revenue', 'cost',
'profit', 'date', 'mth_nbr', 'mth_name', 'yr']
df = pd.DataFrame(None, columns=cols)
df = replace_names(df, dict_, info=False)
df = clean_names(df)
assert expected == list(df.columns)
Parameters
----------
df
a pandas dataframe
values
dictionary of from/to values (optionally) with regex values
info
Default False. If True, print replacement from/to values.
Returns
-------
a pandas dataframe
"""
updated_columns = []
replacements = []
for x in df.columns:
for k, v in dict_.items():
y = re.sub(k, v, x, flags=re.I)
if y != x:
replacements.append((x, y))
x = y
updated_columns.append(x)
if len(replacements) > 0:
logger.info('|Warning| automatic column names substitutions made, for details, info=True')
if info:
repl_ = [f'{x} => {y}' for (x,y) in replacements]
logger.info(f'{repl_}')
df.columns = updated_columns
return df
# rename() {{{1
# @wraps(pd.DataFrame.rename)
def rename(df: pd.DataFrame, *args, **kwargs) -> pd.DataFrame :
'''rename dataframe col(s)
This is a wrapper function rather than using e.g. df.rename()
For details of args, kwargs - see help(pd.DataFrame.rename)
Examples
--------
.. code-block::
%%piper
sample_sales()
>> rename(columns={'product': 'item'})
Parameters
----------
df
dataframe
*args
arguments for wrapped function
**kwargs
keyword-parameters for wrapped function
Returns
-------
pandas DataFrame object
'''
return df.rename(*args, **kwargs)
# rename_axis() {{{1
def rename_axis(df: pd.DataFrame, *args, **kwargs) -> pd.DataFrame :
'''rename dataframe axis
This is a wrapper function rather than using e.g. df.rename_axis()
For details of args, kwargs - see help(pd.DataFrame.rename_axis)
Examples
--------
.. code-block::
%%piper
sample_sales()
>> pivot_wider(index=['location', 'product'], values='target_sales')
>> reset_index()
>> head(tablefmt='plain')
location product target_sales
0 London Beachwear 25478
1 London Footwear 25055
2 London Jeans 25906
3 London Sportswear 26671
%%piper
sample_sales()
>> pivot_wider(index=['location', 'product'], values='target_sales')
>> rename_axis(('AAA', 'BBB'), axis='rows')
>> head(tablefmt='plain')
AAA BBB target_sales
0 London Beachwear 25478
1 London Footwear 25055
2 London Jeans 25906
3 London Sportswear 26671
Parameters
----------
df
dataframe
*args
arguments for wrapped function
**kwargs
keyword-parameters for wrapped function
Returns
-------
pandas DataFrame object
'''
return df.rename_axis(*args, **kwargs)
# reset_index() {{{1
# @wraps(pd.DataFrame.reset_index)
def reset_index(df: pd.DataFrame,
*args,
**kwargs) -> pd.DataFrame:
'''reset_index dataframe
This is a wrapper function rather than using e.g. df.reset_index()
For details of args, kwargs - see help(pd.DataFrame.reset_index)
Parameters
----------
df
dataframe
*args
arguments for wrapped function
**kwargs
keyword-parameters for wrapped function
Returns
-------
A pandas DataFrame
'''
return df.reset_index(*args, **kwargs)
# right_join() {{{1
def right_join(df: pd.DataFrame, *args, **kwargs) -> pd.DataFrame:
'''df (All/na) | df2 (All) df2 always returned
This is a wrapper function rather than using e.g. df.merge(how='right')
For details of args, kwargs - see help(pd.DataFrame.merge)
Examples
--------
.. code-block::
order_data = {'OrderNo': [1001, 1002, 1003, 1004, 1005],
'Status': ['A', 'C', 'A', 'A', 'P'],
'Type_': ['SO', 'SA', 'SO', 'DA', 'DD']}
orders = pd.DataFrame(order_data)
status_data = {'Status': ['A', 'C', 'P'],
'description': ['Active', 'Closed', 'Pending']}
statuses = pd.DataFrame(status_data)
order_types_data = {'Type_': ['SA', 'SO'],
'description': ['Sales Order', 'Standing Order'],
'description_2': ['Arbitrary desc', 'another one']}
types_ = pd.DataFrame(order_types_data)
%%piper
orders >> right_join(types_, suffixes=('_orders', '_types'))
OrderNo Status Type_ description description_2
1002 C SA Sales Order Arbitrary desc
1001 A SO Standing Order another one
1003 A SO Standing Order another one
'''
kwargs['how'] = 'right'
logger.debug(f"{kwargs}")
return df.merge(*args, **kwargs)
# rows_to_names() {{{1
def rows_to_names(df: pd.DataFrame,
start: int = 0,
end: int = 1,
delimitter: str = ' ',
fillna: bool = False,
infer_objects: bool = True) -> pd.DataFrame:
'''promote row(s) to column name(s)
Optionally, infers remaining data column data types.
Examples
--------
.. code-block::
data = {'A': ['Customer', 'id', 48015346, 49512432],
'B': ['Order', 'Number', 'DE-12345', 'FR-12346'],
'C': [np.nan, 'Qty', 10, 40],
'D': ['Item', 'Number', 'SW-10-2134', 'YH-22-2030'],
'E': [np.nan, 'Description', 'Screwdriver Set', 'Workbench']}
df = pd.DataFrame(data)
head(df, tablefmt='plain')
A B C D E
0 Customer Order nan Item nan
1 id Number Qty Number Description
2 48015346 DE-12345 10 SW-10-2134 Screwdriver Set
3 49512432 FR-12346 40 YH-22-2030 Workbench
.. code-block::
df = rows_to_names(df, fillna=True)
head(df, tablefmt='plain')
Customer Id Order Number Order Qty Item Number Item Description
2 48015346 DE-12345 10 SW-10-2134 Screwdriver Set
3 49512432 FR-12346 40 YH-22-2030 Workbench
Parameters
----------
df
dataframe
start
starting row - default 0
end
ending row to combine, - default 1
delimitter
character to be used to 'join' row values together. default is ' '
fillna
default False. If True, fill nan values in header row.
infer_objects
default True. Infer data type of resultant dataframe
Returns
-------
A pandas dataframe
'''
if fillna:
df.iloc[start] = df.iloc[start].ffill().fillna('')
data = df.iloc[start].astype(str).values
rows = range(start+1, end+1)
for row in rows:
data = data + delimitter + df.iloc[row].astype(str).values
df.columns = data
# Read remaining data and reference back to dataframe reference
df = df.iloc[end + 1:]
if infer_objects:
df = df.infer_objects()
df = clean_names(df, case='report')
return df
# sample() {{{1
def sample(df: pd.DataFrame,
n: int = 2,
shape: bool = True,
*args,
**kwargs):
'''show sample data
This is a wrapper function rather than using e.g. df.sample()
For details of args, kwargs - see help(pd.DataFrame.sample)
Examples
--------
.. code-block::
sample(df)
Parameters
----------
df
dataframe
shape
show shape information as a logger.info() message
*args
arguments for wrapped function
**kwargs
keyword-parameters for wrapped function
Returns
-------
A pandas dataframe
'''
if info:
_shape(df)
if isinstance(df, pd.Series):
return df.sample(n=n, *args, **kwargs).to_frame()
return df.sample(n=n, *args, **kwargs)
# select() {{{1
def select(df: pd.DataFrame,
*args,
regex: str = None,
like: str = None,
include: str = None,
exclude: str = None) -> pd.DataFrame:
'''select dataframe columns
Inspired by the select() function from R tidyverse.
Select column names from a dataframe
Examples
--------
.. code-block::
df = sample_sales()
select(df) # select ALL columns
# select column column listed
select(df, 'location')
# select columns listed
select(df, ['product', 'target_sales'])
# select ALL columns EXCEPT the column listed (identified by - minus sign prefix)
select(df, '-target_profit')
# select ALL columns EXCEPT the column specified with a minus sign within the list
select(df, ['-target_sales', '-target_profit'])
# select column range using a tuple from column up to and including the 'to' column.
select(df, ('month', 'actual_profit'))
# select all number data types
select(df, '-month', include='number')
# exclude all float data types including month column
select(df, '-month', exclude='float')
# select using a regex string
select(df, regex='sales') -> select fields containing 'sales'
select(df, regex='^sales') -> select fields starting with 'sales'
select(df, regex='sales$') -> select fields ending with 'sales'
.. note ::
See the numpy dtype hierarchy. To select:
- strings use the 'object' dtype, but note that this will return
all object dtype columns
- all numeric types, use np.number or 'number'
- datetimes, use np.datetime64, 'datetime' or 'datetime64'
- timedeltas, use np.timedelta64, 'timedelta' or 'timedelta64'
- Pandas categorical dtypes, use 'category'
- Pandas datetimetz dtypes, use 'datetimetz' (new in 0.20.0) or 'datetime64[ns, tz]'
Parameters
----------
df
dataframe
args
if not specified, then ALL columns are selected
- str : single column (in quotes)
- list : list of column names (in quotes)
- tuple : specify a range of column names or column positions (1-based)
.. note::
Prefixing column name with a minus sign filters out the column
from the returned list of columns e.g. '-sales', '-month'
regex
Default None. Wrapper for regex keyword in pd.DataFrame.filter()
Keep labels from axis for which re.search(regex, label) == True.
like
Default None. Wrapper for like keyword in pd.DataFrame.filter()
Keep labels from axis for which like in label == True.
include
Default None. Wrapper for include keyword in pd.DataFrame.select_dtypes()
exclude
Default None. Wrapper for exclude keyword in pd.DataFrame.select_dtypes()
Returns
-------
pandas DataFrame object
'''
columns = list(df.columns)
selected: List = []
drop: List = []
for column_arg in args:
if isinstance(column_arg, str):
selected, drop = _check_col(column_arg, selected, drop, columns)
# Tuples used to specify a 'range' of columns (from/to)
if isinstance(column_arg, tuple):
if sum([isinstance(v, str) for v in column_arg]) == 2:
cols = list(df.loc[:, slice(*column_arg)].columns)
for col in cols:
selected, drop = _check_col(col, selected, drop, columns)
if sum([isinstance(v, int) for v in column_arg]) == 2:
first, last = column_arg
cols = list(df.iloc[:, range(first-1, last)].columns)
for col in cols:
selected, drop = _check_col(col, selected, drop, columns)
# Lists to be used for passing in a set of distinct values
if isinstance(column_arg, list):
for col in column_arg:
selected, drop = _check_col(col, selected, drop, columns)
if like is not None:
cols = df.filter(like=like)
for col in cols:
selected, drop = _check_col(col, selected, drop, columns)
if regex is not None:
cols = df.filter(regex=regex)
for col in cols:
selected, drop = _check_col(col, selected, drop, columns)
if include is not None:
cols = df.select_dtypes(include=include)
for col in cols:
selected, drop = _check_col(col, selected, drop, columns)
if exclude is not None:
cols = df.select_dtypes(exclude=exclude)
for col in cols:
selected, drop = _check_col(col, selected, drop, columns)
if selected == []:
selected = columns
if drop != []:
for col in drop:
if col in selected:
selected.remove(col)
return df[selected]
# set_names() {{{1
def set_names(df: pd.DataFrame,
columns: Union[str, Any] = None) -> pd.DataFrame:
'''set dataframe column names
Parameters
----------
df
dataframe
columns
column(s) values to be changed in referenced dataframe
Returns
-------
A pandas dataframe
'''
df.columns = columns
return df
# set_index() {{{1
# @wraps(pd.DataFrame.set_index)
def set_index(df: pd.DataFrame,
*args,
**kwargs) -> pd.DataFrame:
'''set_index dataframe
This is a wrapper function rather than using e.g. df.set_index()
For details of args, kwargs - see help(pd.DataFrame.set_index)
Parameters
----------
df
dataframe
*args
arguments for wrapped function
**kwargs
keyword-parameters for wrapped function
Returns
-------
A pandas DataFrame
'''
return df.set_index(*args, **kwargs)
# split_dataframe() {{{1
def split_dataframe(df: pd.DataFrame,
chunk_size: int = 1000) -> List:
''' Split dataframe by chunk_size rows, returning multiple dataframes
1. Define 'range' (start, stop, step/chunksize)
2. Use np.split() to examine dataframe indices using the calculated 'range'.
Parameters
----------
df
dataframe to be split
chunksize
default=1000
Returns
-------
A list of pd.DataFrame 'chunks'
Examples
--------
.. code-block::
chunks = split(customer_orders_tofix, 1000)
for df in chunks:
display(head(df, 2))
'''
nrows = df.shape[0]
range_ = range(1 * chunk_size, (nrows // chunk_size + 1) * chunk_size, chunk_size)
logger.debug(range_)
return np.split(df, range_)
# str_clean_number {{{1
def str_clean_number(series: pd.Series,
decimal: str = '.',
dtype: str = 'float64'):
''' clean number (e.g. currency, price) values
Series based conversion of string values which are supposed to be
numeric (e.g. prices, currency, float values).
Returns 'cleaned' values i.e. numbers, decimal point and negative '-'
are the only character values allowed.
.. note::
If a non-decimal point symbol supplied, the function issues a
warning that no data type conversion to numeric values can be performed.
Examples
--------
.. code-block::
values = ['$ 1000.48', '-23,500.54', '1004,0 00 .22', '-£43,000',
'EUR 304s0,00.00', '354.00-', '301 ', '4q5056 GBP',
'USD 20621.54973']
expected = [1000.48, -23500.54, 1004000.22, -43000.0, 304000.0,
-354.0, 301.0, 45056.0, 20621.54973]
df = pd.DataFrame(values, columns=['values'])
df['values'] = str_clean_number(df['values'])
assert expected == df['values'].values.tolist()
Parameters
----------
series
a pandas series
decimal
default is '.'
The decimal symbol e.g. decimal point or decimal comma (in Europe)
dtype
Default 'float64'. The default data type to be used to convert
the column/series. Set to None if you don't want to auto convert data type.
Returns
-------
a pandas series
'''
# make sure all values are treated as strings first.
series = series.astype(str)
# Remove all non decimal (retain decimal symbol)
series = series.str.replace(f'[^0-9\-\{decimal}]', '', regex=True)
# If decimal symbol repeated, remove all except rightmost value
series = series.str.replace(f'\{decimal}(?=.*\{decimal})', '', regex=True)
if decimal != '.':
logger.info('|Warning| Non-decimal symbol supplied, cannot convert to numeric')
series = series.where(series.str.contains('-') == False,
series.str.replace('-','', regex=True)
.str.replace('^(.*)', '-\\1', regex=True))
else:
# If value(s) contain a minus sign, remove then reapply by multiplying by -1
series = series.where(series.str.contains('-') == False,
series.str.replace('[-]','', regex=True).astype(float) * -1)
if dtype is not None:
series = pd.to_numeric(series).astype(dtype)
return series
# str_join() {{{1
def str_join(df: pd.DataFrame,
columns: List = None,
column: str = None,
sep: str = '',
loc: str = 'after',
drop: bool = True) -> pd.DataFrame:
''' join or combine columns with a separator
Join or combine a number of columns together into one column. Column(s) that
are not of 'string' data type are automatically converted to strings then
combined.
.. note::
If the column keyword value contains one of the combined columns, it will
automatically replace the original column if drop=True.
If drop=False, the function will rename and append an underscore to the
returned combined column name. See example for details:
Parameters
----------
df
a pandas dataframe
columns
list of column names to join/combine
column
(optional) column name to store the results of combined columns
sep
(optional) separator value. Default is ''.
loc
location to place the split column output within the dataframe
Default is 'after' meaning place the output after the column.
Valid locations are: 'before' or 'after' the column.
You can also specify 'first' or 'last' corresponding to the first and last
columns of the dataframe.
For more information about relocating columns - see the relocate() function.
drop
drop original columns used in str_join function
Returns
-------
A copy of the pandas dataframe
Examples
--------
.. code-block::
%%piper
sample_sales()
>> str_join(columns=['actual_sales', 'product', 'actual_profit'],
sep='|',
column='actual_sales',
drop=True)
>> head(tablefmt='plain')
location month target_sales target_profit actual_sales
4 London 2021-01-01 00:00:00 31749 1905 29209.08|Beachwear|1752.54
125 London 2021-01-01 00:00:00 37833 6053 34049.7|Beachwear|5447.95
21 London 2021-01-01 00:00:00 29485 4128 31548.95|Jeans|4416.85
148 London 2021-01-01 00:00:00 37524 3752 40901.16|Jeans|4090.12
Same example, this time with drop=False, note the appended underscore in the
combined column name.
.. code-block::
%%piper
sample_sales()
>> select(['-target_profit', '-month'])
>> str_join(columns=['actual_sales', 'product', 'actual_profit'],
sep='|',
column='actual_sales',
drop=False)
>> head(tablefmt='plain')
location product target_sales actual_sales actual_sales_ actual_profit
4 London Beachwear 31749 29209 29209.08|Beachwear|1752.54 1753
125 London Beachwear 37833 34050 34049.7|Beachwear|5447.95 5448
21 London Jeans 29485 31549 31548.95|Jeans|4416.85 4417
148 London Jeans 37524 40901 40901.16|Jeans|4090.12 4090
'''
df = _dataframe_copy(df, inplace=False)
if not isinstance(columns, list):
raise NameError(f"Columns '{columns}' must be a list")
if column is not None:
if not isinstance(column, str):
raise TypeError(f"Column name '{column}' must be a string")
if len(columns) < 2:
raise ValueError(f"Please enter at least 2 columns")
for col in columns:
if col not in df.columns.tolist():
raise NameError(f"Please check column name '{col}' specified")
# Make sure ALL columns to be concatenated are string type
for col in columns:
if not pd.api.types.is_string_dtype(df[col]) and \
not pd.api.types.is_object_dtype(df[col]):
df[col] = df[col].astype(str)
new_col = df[columns[0]].str.cat(df[columns[1]], sep=sep)
if column is None:
new_col.name = '0'
else:
new_col.name = column
# If one of the columns to be combined has the same name
# as the new column, append '_' to the combined column name.
duplicate_column_name = False
for idx, col in enumerate(columns):
if col in new_col.name:
new_col.name = new_col.name + '_'
duplicate_column_name = True
if len(columns) > 2:
for col in columns[2:]:
new_col = new_col.str.cat(df[col], sep=sep)
df = pd.concat([df, new_col], axis=1)
df = relocate(df, new_col.name, loc=loc, ref_column=columns[0])
if drop:
df = df.drop(columns=columns)
if duplicate_column_name:
df = df.rename(columns={new_col.name: column})
return df
# str_split() {{{1
def str_split(df: pd.DataFrame,
column: str = None,
columns: List = None,
pat: str = ',',
n: int = -1,
expand: bool = True,
loc: str = 'after',
drop: bool = False) -> pd.DataFrame:
''' split column
Function accepts a column to split, a pattern/delimitter value and optional
list of column names to store the result. By default the result is placed just
after the specified column.
.. note::
If one of the target split columns contains the same name as the column
to be split and drop=False, the function will append an underscore '_'
to the end of the corresponding new split column name.
If drop=True, the the new split column name will NOT be renamed and just
replace the original column name. See examples for details.
Parameters
----------
df
a pandas dataframe
column
column to be split
columns
list-like of column names to store the results of the split
column values
pat
regular expression pattern. Default is ','
.. note::
For space(s), safer to use r'\\s' rather than ' '.
n
default -1. Number of splits to capture. -1 means capture ALL splits
loc
location to place the split column output within the dataframe
Default is 'after' meaning place the output after the column.
Valid locations are: 'before' or 'after' the column.
You can also specify 'first' or 'last' corresponding to the first and last
columns of the dataframe.
For more information about relocating columns - see the relocate() function.
drop
drop original column to be split
Returns
-------
A copy of the pandas dataframe
Examples
--------
An example where the one of the new split column names is the same as the
split column.
.. code-block::
%%piper
sample_sales()
>> select(['-target_profit', '-actual_sales'])
>> str_split(column='month',
pat='-',
columns=['day', 'month', 'year'],
drop=False)
>> head(tablefmt='plain')
location product month day month_ year target_sales actual_profit
4 London Beachwear 2021-01-01 2021 01 01 31749 1753
125 London Beachwear 2021-01-01 2021 01 01 37833 5448
21 London Jeans 2021-01-01 2021 01 01 29485 4417
148 London Jeans 2021-01-01 2021 01 01 37524 4090
The same example, this time with the drop=True parameter specified.
.. code-block::
sample_sales()
>> select(['-target_profit', '-actual_sales'])
>> str_split(column='month',
pat='-',
columns=['day', 'month', 'year'],
drop=True)
>> head(tablefmt='plain')
location product day month year target_sales actual_profit
4 London Beachwear 2021 01 01 31749 1753
125 London Beachwear 2021 01 01 37833 5448
21 London Jeans 2021 01 01 29485 4417
148 London Jeans 2021 01 01 37524 4090
'''
df = _dataframe_copy(df, inplace=False)
if not isinstance(column, str):
raise TypeError(f"Column name '{column}' must be a string")
if column not in df.columns:
raise NameError(f"Please check column name '{column}' specified")
if columns is not None:
if not isinstance(columns, list):
raise TypeError(f"Columns '{columns}' must be list-like")
# Make sure column to be split is of string type
if not pd.api.types.is_string_dtype(df[column]) and \
not pd.api.types.is_object_dtype(df[column]):
df[column] = df[column].astype(str)
if not expand:
df[column] = df[column].str.split(pat=pat, n=n, expand=False)
else:
split_cols = df[column].str.split(pat=pat, n=n, expand=True)
duplicate_column_name = False
if columns is None:
df = | pd.concat([df, split_cols], axis=1) | pandas.concat |
"""Univariate anomaly detection module."""
__version__ = '1.0.0'
from typing import Dict
from fastapi import FastAPI
from pydantic import BaseModel
from adtk.detector import PersistAD, ThresholdAD, LevelShiftAD, VolatilityShiftAD
import numpy
import pandas
from . core.tools import aggregate_anomalies
app = FastAPI(
title='Univariate anomaly detection module.',
docs_url='/documentation',
redoc_url='/redoc',
description='Univariate anomaly detection based on historic data for time series.',
version=__version__
)
class Parameters(BaseModel):
"""Parameters for ADTK PersistAD"""
c: float = 3.0
window: str = '28D'
aggregate_anomalies: str = None
class TimeSeriesData(BaseModel):
"""Data provided for point anomaly detection."""
train_data: Dict[str, float]
score_data: Dict[str, float]
parameters: Parameters
class Anomalies(BaseModel):
"""Anomalies"""
anomaly_list: Dict[str, bool]
class ParametersThresholdAD(BaseModel):
"""Parameters for ADTK ThresholdAD"""
high: float = None
low: float = None
aggregate_anomalies: str = None
class TimeSeriesDataThresholdAD(BaseModel):
"""Data provided for point anomaly detection."""
score_data: Dict[str, float]
parameters: ParametersThresholdAD
class ParametersLevelShiftAD(BaseModel):
"""Parameters for ADTK LevelShiftAD"""
c: float = 20.0
window: str = '60S'
aggregate_anomalies: str = None
class TimeSeriesDataLevelShiftAD(BaseModel):
"""Data provided for point anomaly detection."""
score_data: Dict[str, float]
parameters: ParametersLevelShiftAD
class ParametersVolatilityShiftAD(BaseModel):
"""Parameters for ADTK LevelShiftAD"""
c: float = 20.0
window: str = '60S'
aggregate_anomalies: str = None
class TimeSeriesDataVolatilityShiftAD(BaseModel):
"""Data provided for point anomaly detection."""
score_data: Dict[str, float]
parameters: ParametersVolatilityShiftAD
@app.post('/detect-point-anomalies', response_model=Anomalies)
async def detect_point_anomalies(time_series_data: TimeSeriesData):
"""Apply point anomaly detection and return list of anomalies."""
# create pandas Series from dictionary containing the time series
train_data = pandas.Series(time_series_data.train_data)
train_data.index = pandas.to_datetime(train_data.index, unit='ms')
score_data = pandas.Series(time_series_data.score_data)
score_data.index = pandas.to_datetime(score_data.index, unit='ms')
# apply persist anomaly detection to time series
persist_ad = PersistAD(
c=time_series_data.parameters.c,
side='both',
window=time_series_data.parameters.window
)
persist_ad.fit(train_data)
anomalies = persist_ad.detect(score_data)
# aggregate anomalies
if time_series_data.parameters.aggregate_anomalies:
# if aggregate_anomalies is passed with request
anomalies = aggregate_anomalies(
anomalies=anomalies,
aggregation_interval=time_series_data.parameters.aggregate_anomalies
)
# convert anomalies Series to dictionary with timestamps
anomalies.index = (
anomalies.index.astype(numpy.int64) // 10 ** 6
).astype(str)
anomalies = anomalies == 1
anomalies_dict = anomalies.to_dict()
return Anomalies(anomaly_list=anomalies_dict)
@app.post('/detect-threshold-anomalies', response_model=Anomalies)
async def detect_threshold_anomalies(time_series_data: TimeSeriesDataThresholdAD):
"""Apply simple threshold anomaly detection and return list of anomalies."""
# create pandas Series from dictionary containing the time series
score_data = | pandas.Series(time_series_data.score_data) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 2 15:41:04 2021
Run MLR hedonic with run_MLR_on_all_years(features=best1)
use plot_price_rooms_new_from_new_ds for time_series new rooms MLR
for standertized betas use plot_regular_feats_comparison_from_new_ds
For RF, HP tuning :
run_CV_on_all_years(df,savepath=ml_path,model_name='RF', feats=best_rf2+['SEI'])
Multifunction for RF results:
loop_over_RF_models_years(df, path=work_david/'ML', mode='score',
pgrid='normal'
use mode = 'score' to calculate the R^2 for training and test
use mode = 'time-series' to get the predictions.
use mode = 'shap' to calculate the SHAP values for the test sets.(warning this takes longest)
use mode = 'X_test' to get the test sets.
use mode = 'FI' to get feature importances.
then there are plot functions for RF and MLR:
1) plot_RF_time_series(time-series)
2) plot_RF_FI_results(fi)
3) First, produce MLR SHAPS: svs=produce_shap_MLR_all_years(df)
then, produce_RF_abs_SHAP_all_years(path=ml_path/'RF_rooms_345',mlr_shap=svs)
4)
how to produce weighted mean distance to ECs for all Israeli settelments:
first load israeli settelment mid-points:
gdf=geo_location_settelments_israel() (from cbs_procedures)
then run calculate_distance_from_gdf_to_employment_centers:
dis = calculate_distance_from_gdf_to_employment_centers(gdf,n=18, x_coord_name='X', y_coord_name='Y')
finally save to csv:
dis.to_csv(work_david/'Israel_settlments_with_mean_weighted_distance_to_ECs.csv', na_rep='NA',sep=',', index=False)
@author: shlomi
"""
from MA_paths import work_david
from MA_paths import savefig_path
import numpy as np
ml_path = work_david / 'ML'
features = ['Floor_number', 'SEI', 'New', 'Periph_value', 'Sale_year', 'Rooms_345',
'distance_to_nearest_kindergarten', 'distance_to_nearest_school', 'Total_ends']
features1 = ['FLOORNO', 'DEALNATURE', 'NEWPROJECTTEXT',
'BUILDINGYEAR', 'SEI_value', 'Ground', 'P2015_value', 'year', 'Building_Growth_Rate']
features2 = ['FLOORNO', 'DEALNATURE', 'NEWPROJECTTEXT',
'SEI_value', 'Ground', 'year', 'Building_Growth_Rate']
features3 = ['Floor_number', 'SEI', 'New', 'Periph_value', 'Sale_year', 'Rooms_345',
'Total_ends', 'mean_distance_to_4_mokdim']
best = ['SEI', 'New', 'Sale_year', 'Rooms_345',
'Total_ends', 'mean_distance_to_28_mokdim', 'Netflow']
best1 = ['SEI', 'New', 'Sale_year', 'Rooms_345',
'Total_ends', 'mean_distance_to_28_mokdim']
best_years = best + ['year_{}'.format(x) for x in np.arange(2001, 2020)]
best_for_bs = best + ['city_code', 'Price']
next_best = ['Floor_number', 'New', 'Sale_year', 'Rooms',
'Total_ends']
best_rf = ['SEI_value_2015', 'SEI_value_2017',
'New', 'Sale_year', 'Rooms','Netflow',
'Total_ends', 'mean_distance_to_28_mokdim']
best_rf1 = ['SEI_value_2015', 'SEI_value_2017',
'New', 'Sale_year', 'Rooms',
'Total_ends', 'mean_distance_to_28_mokdim']
best_rf2 = ['SEI_value_2015', 'SEI_value_2017',
'New', 'Sale_year', 'Rooms_345',
'Total_ends', 'mean_distance_to_28_mokdim']
dummies = ['New', 'Rooms_4', 'Rooms_5']
year_dummies = ['year_{}'.format(x) for x in np.arange(2001,2020)]
room_dummies = ['Rooms_4', 'Rooms_5']
best_regular = ['SEI', 'Total_ends', 'mean_distance_to_28_mokdim', 'Netflow']
best_regular1 = ['SEI', 'Total_ends', 'mean_distance_to_28_mokdim']
general_features = ['Price', 'Rooms', 'Area_m2', 'New', 'Floor_number', 'Floors_In_Building',
'Age', 'Total_ends', 'SEI', 'mean_distance_to_28_mokdim']
apts = ['דירה', 'דירה בבית קומות']
apts_more = apts + ["קוטג' דו משפחתי", "קוטג' חד משפחתי",
"דירת גן", "בית בודד", "דירת גג", "דירת גג (פנטהאוז)"]
plot_names = {'Floor_number': 'Floor',
# 'New': 'New Apartment',
'Periph_value': 'Peripheriality',
'distance_to_nearest_kindergarten': 'Nearest kindergarten',
'distance_to_nearest_school': 'Nearest school',
'Total_ends': 'Building rate',
'mean_distance_to_28_mokdim': 'Distance to ECs',
'SEI': 'Socio-Economic Index',
'SEI_value_2015': 'Social-Economic Index',
'SEI_value_2017': 'Social-Economic Index',
'Rooms': 'Rooms', 'Rooms_3': '3 Rooms', 'Rooms_5': '5 Rooms',
'Netflow': 'Net migration',
'MISH': 'AHP',
'New': 'Used/New'
}
short_plot_names = {'Total_ends': 'BR',
'mean_distance_to_28_mokdim': 'Distance',
'SEI': 'SEI', 'New': 'Used/New'}
vars_plot_names = {'Total_ends': 'BR',
'mean_distance_to_28_mokdim': 'DI',
'SEI': 'SE', 'New': 'NE', 'Rooms': 'RM'}
vars_explained_plot_names = {'Total_ends': 'BR (Building Rate)',
'mean_distance_to_28_mokdim': 'DI (Distance to ECs)',
'SEI': 'SE (Socio-Economic Index)', 'New': 'NE (Used/New)', 'Rooms': 'RM (# of Rooms)'}
add_units_dict = {'Distance': 'Distance [km]', 'BR': r'BR [Apts$\cdot$yr$^{-1}$]',
'Netflow': r'Netflow [people$\cdot$yr$^{-1}$]'}
add_units_dict_short = {'DI': 'DI [km]', 'BR': r'BR [Apts$\cdot$yr$^{-1}$]'}
# AHP : Afforable Housing Program
def pct_change(x):
import numpy as np
return (np.exp(x)-1)*100
def plot_single_tree(rf_model, X_train, y_train, est_index=100, samples=25, max_depth=2):
from sklearn import tree
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
# rf = RandomForestRegressor(max_depth=15,n_estimators=250)
# feats = ['Status', 'Rooms', 'BR', 'Distance', 'SEI']
X_train = X_train.rename(vars_plot_names, axis=1)
feats = ['NE', 'RM', 'BR', 'DI', 'SE']
# sns.set_theme(font_scale=1.8)
fig, ax = plt.subplots(figsize=(17, 10))
inds = X_train.sample(n=samples).index
y_train = np.log(np.exp(y_train)/4)
rf_model.fit(X_train.loc[inds], y_train.loc[inds])
_ = tree.plot_tree(rf_model[est_index],precision=2, fontsize=18, rounded=True,
feature_names=feats, filled=True, ax=ax, max_depth=max_depth, proportion=False)
filename = 'Nadlan_tree_example.png'
plt.savefig(savefig_path / filename, bbox_inches='tight', pad_inches=0.1)
return fig
def compare_r2_RF_MLR(sc, ds, mode='diagram'):
"""compare R2 score from dataset (sc=loop_over with mode=score)
and ds=run_MLR_on_all_years"""
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme(style='ticks', font_scale=1.6)
fig, ax = plt.subplots(figsize=(17, 10))
df = ds['R-squared'].to_dataframe()
df = pd.concat([df, sc], axis=1)
df.columns = ['Hedonic', 'RF train', 'RF test']
df['year'] = df.index
df = df.melt(id_vars=['year'], var_name='Model',
value_name=r'R$^2$')
# df['year'] = pd.to_datetime(df['year'], format='%Y')
if mode == 'diagram':
ax = sns.barplot(data=df, x='year', ax=ax, hue='Model', y=r'R$^2$')
# ax.set_ylabel('Apartment area [{}]'.format(unit_label))
h, l =ax.get_legend_handles_labels()
ax.legend_.remove()
ax.legend(h, l, ncol=3, title='Model')
ax.set_xlabel('')
ax.grid(True)
sns.despine(fig)
fig.tight_layout()
# for wide dataframe:
# df = df.pivot_table(columns=['Model'],values='R$^2$',index='year')
return df
def remove_outlier_area_per_room(df, col='Area_m2', k=1.5):
from Migration_main import remove_outlier
import pandas as pd
dfs = []
for room in df['Rooms'].dropna().unique():
df1 = remove_outlier(df[df['Rooms'] == room], col_name=col, k=k)
dfs.append(df1)
df = pd.concat(dfs, axis=0)
return df
def plot_rooms_area_distribution(df, units='m2'):
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme(style='ticks', font_scale=1.8)
fig, ax = plt.subplots(figsize=(17, 10))
if units == 'ft2':
df['Area_ft2'] = df['Area_m2'] * 10.764
col = 'Area_ft2'
unit_label = 'ft$^2$'
elif units == 'm2':
col = 'Area_m2'
unit_label = 'm$^2$'
sns.violinplot(data=df, x='Rooms', y=col, ax=ax, palette='inferno')
ax.set_ylabel('Apartment area [{}]'.format(unit_label))
ax.set_xlabel('Number of rooms')
ax.grid(True)
sns.despine(fig)
fig.tight_layout()
return fig
def plot_general_features_corr_heatmap(df, feats=general_features, year=None):
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme(style='ticks', font_scale=1.5)
fig, ax = plt.subplots(figsize=(17, 10))
if year is not None:
df = df[df['Sale_year']==year]
title = 'year = {}'.format(year)
else:
title = '2000 to 2019'
dff = df[feats]
dff = dff.rename(short_plot_names, axis=1)
g = sns.heatmap(dff.corr(),annot=True,cmap='coolwarm', ax=ax, center=0)
g.set_xticklabels(g.get_xticklabels(), rotation=45, ha='right')
fig.tight_layout()
fig.suptitle(title)
fig.subplots_adjust(top=0.945)
return fig
def plot_RF_time_series(X_ts, units='nis'):
"""plot rooms new time series from RF model"""
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from cbs_procedures import read_mean_salary
sns.set_theme(style='ticks', font_scale=1.8)
fig, ax = plt.subplots(figsize=(17, 10))
X_ts = X_ts[X_ts['Rooms'].isin([3, 4, 5])]
X_ts['Rooms'] = X_ts['Rooms'].astype(int)
X_ts = X_ts.rename({'New': 'Used/New'}, axis=1)
X_ts['Used/New'][X_ts['Used/New']==0] = 'Used'
X_ts['Used/New'][X_ts['Used/New']==1] = 'New'
if units == 'dollar':
X_ts['Price'] /= 4 * 1000
ylabel = 'Apartment Price [Thousands $]'
elif units == 'nis':
X_ts['Price'] /= 1e6
ylabel = 'Apartment Price [millions NIS]'
elif units == 'salary':
sal = read_mean_salary().rename({'year': 'Year'}, axis=1)
X_ts = pd.merge(X_ts, sal, on='Year', how='inner')
X_ts['Price'] /= X_ts['mean_salary']
ylabel = 'Mean salary'
X_ts['Year'] = pd.to_datetime(X_ts['Year'], format='%Y')
X_ts = X_ts.reset_index(drop=True)
sns.lineplot(data=X_ts, x='Year', y='Price', hue='Rooms', style='Used/New',
ax=ax, palette='tab10', markers=True, markersize=10)
ax.set_ylabel(ylabel)
ax.set_xlabel('')
ax.grid(True)
sns.despine(fig)
fig.tight_layout()
return fig
def produce_shap_MLR_all_years(df, feats=best1, abs_val=True):
from sklearn.linear_model import LinearRegression
import shap
import numpy as np
years = np.arange(2000, 2020, 1)
svs = []
for year in years:
print(year)
X, y = prepare_new_X_y_with_year(df, features=feats, year=year,
y_name='Price')
lr = LinearRegression()
lr.fit(X, y)
ex = shap.LinearExplainer(lr, X)
shap_values = ex.shap_values(X)
SV = convert_shap_values_to_pandas(shap_values, X)
if abs_val:
print('producing ABS SHAP.')
SV = produce_abs_SHAP_from_df(SV, X, plot=False)
svs.append(SV)
return svs
def loop_over_RF_models_years(df, path=work_david/'ML', mode='score',
pgrid='normal', feats=best_rf2+['SEI']):
import numpy as np
import pandas as pd
import shap
import xarray as xr
years = np.arange(2000, 2020, 1)
train_scores = []
test_scores = []
x_tests = []
fis = []
# shaps = []
for year in years:
print(year)
_, gr = load_HP_params_from_optimized_model(path, pgrid=pgrid,
year=year)
rf = gr.best_estimator_
X_train, X_test, y_train, y_test = produce_X_y_RF_per_year(df,
year=year,
verbose=0, feats=feats)
rf.fit(X_train, y_train)
if mode == 'score':
train_scores.append(rf.score(X_train, y_train))
test_scores.append(rf.score(X_test, y_test))
elif mode == 'time-series':
y_pred = rf.predict(X_test)
y_pred = np.exp(y_pred)
X_test['Price'] = y_pred
X_test['Year'] = year
X_test = X_test.reset_index(drop=True)
x_tests.append(X_test)
elif mode == 'shap':
# rf.fit(X_train, y_train)
explainer = shap.TreeExplainer(rf)
shap_values = explainer.shap_values(X_test.values)
SV = convert_shap_values_to_pandas(shap_values, X_test)
filename = 'Nadlan_SHAP_RF_{}.csv'.format(year)
SV.to_csv(path/filename, index=False)
# SV = SV.to_xarray().to_array('feature')
# return SV, X_test
# shaps.append(SV)
elif mode == 'X_test':
X_test.index.name = 'sample'
filename = 'Nadlan_X_test_RF_{}.csv'.format(year)
X_test.to_csv(path/filename, index=False)
# x_tests.append(X_test.to_xarray().to_array('feature'))
elif mode == 'FI':
fi = pd.DataFrame(rf.feature_importances_).T
fi.columns = X_train.columns
fi['year'] = year
fis.append(fi)
if mode == 'score':
sc = pd.DataFrame(train_scores)
sc.columns = ['train_r2']
sc['test_r2'] = test_scores
sc.index = years
return sc
elif mode == 'time-series':
X_ts = pd.concat(x_tests, axis=0)
return X_ts
elif mode == 'FI':
FI = pd.concat(fis, axis=0)
return FI
# elif mode == 'shap':
# sv_da = xr.concat(shaps, 'year')
# sv_da['year'] = years
# sv_da.attrs['long_name'] = 'Shapley values via SHAP Python package.'
# sv_da.to_netcdf(path/'Nadlan_SHAP_RF_{}-{}.nc'.format(years[0], years[-1]))
# return sv_da
# elif mode == 'X_test':
# X_ts = xr.concat(x_tests, 'year')
# X_ts['year'] = years
# X_ts.attrs['long_name'] = 'X_tests per year to use with the SHAP'
# X_ts.to_netcdf(path/'Nadlan_X_test_RF_{}-{}.nc'.format(years[0], years[-1]))
# return X_ts
def load_all_yearly_shap_values(path=work_david/'ML'):
import numpy as np
years = np.arange(2000, 2020, 1)
svs = []
X_tests = []
for year in years:
sv, X_test = load_yearly_shap_values(path, year)
svs.append(sv)
X_tests.append(X_test)
return svs, X_tests
def load_yearly_shap_values(path=work_david/'ML', year=2000):
import pandas as pd
X_test = pd.read_csv(path/'Nadlan_X_test_RF_{}.csv'.format(year))
shap_values = pd.read_csv(path/'Nadlan_SHAP_RF_{}.csv'.format(year))
assert len(X_test)==len(shap_values)
return shap_values, X_test
def load_shap_values(path=work_david/'ML', samples=10000,
interaction_too=True, rename=True):
import pandas as pd
import xarray as xr
print('loading {} samples.'.format(samples))
X_test = pd.read_csv(path/'X_test_RF_{}.csv'.format(samples))
shap_values = pd.read_csv(path/'SHAP_values_RF_{}.csv'.format(samples))
if rename:
X_test = X_test.rename(short_plot_names, axis=1)
shap_values = shap_values.rename(short_plot_names, axis=1)
if interaction_too:
print('loading interaction values too.')
shap_interaction_values = xr.load_dataarray(path/'SHAP_interaction_values_RF_{}.nc'.format(samples))
shap_interaction_values['feature1'] = X_test.columns
shap_interaction_values['feature2'] = X_test.columns
return X_test, shap_values, shap_interaction_values
else:
return X_test, shap_values
def plot_dependence(shap_values, X_test, x_feature='RM',
y_features=['DI', 'SE', 'BR'],
alpha=0.2, cmap=None, units='pct_change',
plot_size=1.5, fontsize=16, x_jitter=0.75):
import shap
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.ticker as tck
sns.set_theme(style='ticks', font_scale=1.2)
fig, axes = plt.subplots(len(y_features), 1, sharex=True, figsize=(8, 10))
X = X_test.copy()
X = X.rename(vars_plot_names, axis=1)
shap_values = shap_values.rename(vars_plot_names, axis=1)
X = X.rename(add_units_dict_short, axis=1)
# X['Old/New'] = X['Old/New'].astype(int)
# new_dict = {0: 'Old', 1: 'New'}
# X['Old/New'] = X['Old/New'].map(new_dict)
if units == 'pct_change':
shap_values = shap_values.apply(pct_change)
for i, y in enumerate(y_features):
y_new = add_units_dict_short.get(y, y)
shap.dependence_plot(x_feature, shap_values.values, X, x_jitter=x_jitter,
dot_size=4, alpha=alpha, interaction_index=y_new,
ax=axes[i])
if 'DI' in x_feature:
axes[i].set_xlim(25, 150)
if 'RM' in x_feature:
axes[i].set_xlabel('RM [# of rooms]')
cb = fig.axes[-1]
mapp = cb.collections[1]
fig.canvas.draw()
cbar = fig.colorbar(mapp, ax=axes[i],aspect=50, pad=0.05,
label=y_new)
cbar.set_alpha(0.85)
cbar.draw_all()
cb.remove()
# cbar.ax.set_yticklabels(['Low', 'High'], fontsize=fontsize)
# cbar.set_label('Predictor value')
cbar.outline.set_visible(False)
# axes[i].set_ylabel(axes[i].get_ylabel(), fontsize=fontsize)
# axes[i].set_xlabel(axes[i].get_xlabel(), fontsize=fontsize)
# axes[i].tick_params(labelsize=fontsize)
axes[i].grid(True)
if units == 'pct_change':
la = 'Price change\nfor {} [%]'.format(x_feature)
axes[i].set_ylabel(la)
[ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize) for ax in fig.axes]
[ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize) for ax in fig.axes]
[ax.tick_params(labelsize=fontsize) for ax in fig.axes]
[ax.yaxis.set_major_locator(tck.MaxNLocator(5)) for ax in fig.axes]
fig.tight_layout()
return fig
def plot_summary_shap_values(shap_values, X_test, alpha=0.7, cmap=None,
plot_size=1.5, fontsize=16, units='pct_change'):
import shap
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme(style='ticks', font_scale=1.8)
X_test = X_test.rename(vars_plot_names, axis=1)
shap_values = shap_values.rename(vars_plot_names, axis=1)
if units == 'pct_change':
shap_values = shap_values.apply(pct_change)
if cmap is None:
shap.summary_plot(shap_values.values, X_test, alpha=alpha, plot_size=plot_size)
else:
if not isinstance(cmap, str):
cm = cmap.get_mpl_colormap()
else:
cm = sns.color_palette(cmap, as_cmap=True)
shap.summary_plot(shap_values.values, X_test, alpha=alpha, cmap=cm, plot_size=plot_size)
if len(shap_values.shape) > 2:
fig = plt.gcf()
[ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize) for ax in fig.axes]
[ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize) for ax in fig.axes]
[ax.set_title(ax.get_title(), fontsize=fontsize) for ax in fig.axes]
[ax.tick_params(labelsize=fontsize) for ax in fig.axes]
else:
fig, ax = plt.gcf(), plt.gca()
if units == 'pct_change':
ax.set_xlabel('Price change [%]', fontsize=fontsize)
else:
ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize)
ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
cb = fig.axes[-1]
cbar = fig.colorbar(cb.collections[1], ticks=[0, 1],
aspect=50, pad=0.05)
cb.remove()
cbar.ax.set_yticklabels(['Low', 'High'], fontsize=fontsize)
cbar.set_label('Predictor value')
cbar.ax.tick_params(size=0)
cbar.outline.set_visible(False)
# cb.set_ylabel(cb.get_ylabel(), fontsize=fontsize)
# cb.tick_params(labelsize=fontsize)
fig.tight_layout()
return fig
def select_years_interaction_term(ds, regressor='SEI'):
regs = ['{}_{}'.format(x, regressor) for x in year_dummies]
ds = ds.sel(regressor=regs)
return ds
def produce_RF_abs_SHAP_all_years(path=ml_path, plot=True, mlr_shap=None,
units=None):
import xarray as xr
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
SVs, X_tests = load_all_yearly_shap_values(path)
k2s = []
for i, year in enumerate(np.arange(2000, 2020, 1)):
shap_df = SVs[i]
# shap_df.drop('year', axis=1, inplace=True)
X_test = X_tests[i]
# X_test.drop('year', axis=1, inplace=True)
k2 = produce_abs_SHAP_from_df(shap_df, X_test, plot=False)
k2['year'] = year
if mlr_shap is not None:
k2['Model'] = 'RF'
k2_mlr = mlr_shap[i]
k2_mlr['year'] = year
k2_mlr['Model'] = 'Hedonic'
k2_mlr = k2_mlr[k2_mlr['Predictor'].isin(best_regular1)]
k2 = pd.concat([k2, k2_mlr], axis=0)
k2s.append(k2)
abs_shap = pd.concat(k2s, axis=0)
abs_shap = abs_shap.reset_index(drop=True)
if plot:
sns.set_theme(style='ticks', font_scale=1.6)
fig, ax = plt.subplots(figsize=(17, 10))
abs_shap['year'] = pd.to_datetime(abs_shap['year'], format='%Y')
abs_shap = abs_shap[abs_shap['Predictor']!='New']
abs_shap = abs_shap[abs_shap['Predictor']!='Rooms']
# order:
order = ['SE (Socio-Economic Index)', 'BR (Building Rate)', 'DI (Distance to ECs)']
abs_shap['Predictor'] = abs_shap['Predictor'].map(vars_explained_plot_names)
abs_shap['SHAP_abs'] *= np.sign(abs_shap['Corr'])
if units == 'pct_change':
abs_shap['SHAP_abs'] = abs_shap['SHAP_abs'].apply(pct_change)
# order = ['Socio-Economic Index', 'Building rate', 'Distance to ECs']
if mlr_shap is not None:
sns.lineplot(data=abs_shap, x='year', y='SHAP_abs', hue='Predictor',
ax=ax, palette='Dark2', ci='sd', markers=True, linewidth=2,
hue_order=order, style='Model', markersize=10)
else:
sns.lineplot(data=abs_shap, x='year', y='SHAP_abs', hue='Predictor',
ax=ax, palette='Dark2', ci='sd', markers=True, linewidth=2,
hue_order=order, markersize=10)
if units == 'pct_change':
ax.set_ylabel('Price change [%]')
else:
ax.set_ylabel("mean |SHAP values|")
ax.set_xlabel('')
ax.grid(True)
h, la = ax.get_legend_handles_labels()
ax.legend_.remove()
ax.legend(h, la, ncol=2, loc='center')
sns.despine(fig)
fig.tight_layout()
return abs_shap
def produce_abs_SHAP_from_df(shap_df, X_test, plot=False):
import pandas as pd
shap_v = pd.DataFrame(shap_df)
feature_list = X_test.columns
shap_v.columns = feature_list
df_v = X_test.copy().reset_index()#.drop('time', axis=1)
# Determine the correlation in order to plot with different colors
corr_list = list()
for i in feature_list:
b = np.corrcoef(shap_v[i], df_v[i])[1][0]
corr_list.append(b)
corr_df = pd.concat(
[pd.Series(feature_list), pd.Series(corr_list)], axis=1).fillna(0)
# Make a data frame. Column 1 is the feature, and Column 2 is the correlation coefficient
corr_df.columns = ['Predictor', 'Corr']
corr_df['Sign'] = np.where(corr_df['Corr'] > 0, 'red', 'blue')
# Plot it
shap_abs = np.abs(shap_v)
k = pd.DataFrame(shap_abs.mean()).reset_index()
k.columns = ['Predictor', 'SHAP_abs']
k2 = k.merge(corr_df, left_on='Predictor', right_on='Predictor', how='inner')
k2 = k2.sort_values(by='SHAP_abs', ascending=True)
if plot:
colorlist = k2['Sign']
ax = k2.plot.barh(x='Predictor', y='SHAP_abs',
color=colorlist, figsize=(5, 6), legend=False)
ax.set_xlabel("SHAP Value (Red = Positive Impact)")
return k2
def ABS_SHAP(df_shap, df):
import numpy as np
import pandas as pd
import seaborn as sns
sns.set_theme(style='ticks', font_scale=1.2)
#import matplotlib as plt
# Make a copy of the input data
shap_v = pd.DataFrame(df_shap)
feature_list = df.columns
shap_v.columns = feature_list
df_v = df.copy().reset_index()#.drop('time', axis=1)
# Determine the correlation in order to plot with different colors
corr_list = list()
for i in feature_list:
b = np.corrcoef(shap_v[i], df_v[i])[1][0]
corr_list.append(b)
corr_df = pd.concat(
[pd.Series(feature_list), pd.Series(corr_list)], axis=1).fillna(0)
# Make a data frame. Column 1 is the feature, and Column 2 is the correlation coefficient
corr_df.columns = ['Predictor', 'Corr']
corr_df['Sign'] = np.where(corr_df['Corr'] > 0, 'red', 'blue')
# Plot it
shap_abs = np.abs(shap_v)
k = pd.DataFrame(shap_abs.mean()).reset_index()
k.columns = ['Predictor', 'SHAP_abs']
k2 = k.merge(corr_df, left_on='Predictor', right_on='Predictor', how='inner')
k2 = k2.sort_values(by='SHAP_abs', ascending=True)
colorlist = k2['Sign']
ax = k2.plot.barh(x='Predictor', y='SHAP_abs',
color=colorlist, figsize=(5, 6), legend=False)
ax.set_xlabel("SHAP Value (Red = Positive Impact)")
return
def plot_simplified_shap_tree_explainer(rf_model):
import shap
rf_model.fit(X, y)
dfX = X.to_dataset('regressor').to_dataframe()
dfX = dfX.rename(
{'qbo_cdas': 'QBO', 'anom_nino3p4': 'ENSO', 'co2': r'CO$_2$'}, axis=1)
ex_rf = shap.Explainer(rf_model)
shap_values_rf = ex_rf.shap_values(dfX)
ABS_SHAP(shap_values_rf, dfX)
return
def convert_shap_values_to_pandas(shap_values, X_test):
import pandas as pd
SV = pd.DataFrame(shap_values)
SV.columns = X_test.columns
SV.index.name = 'sample'
return SV
def plot_Tree_explainer_shap(rf_model, X_train, y_train, X_test, samples=1000):
import shap
from shap.utils import sample
print('fitting...')
rf_model.fit(X_train, y_train)
# explain all the predictions in the test set
print('explaining...')
explainer = shap.TreeExplainer(rf_model)
# rename features:
X_test = X_test.rename(plot_names, axis=1)
if samples is not None:
print('using just {} samples out of {}.'.format(samples, len(X_test)))
shap_values = explainer.shap_values(sample(X_test, samples).values)
shap.summary_plot(shap_values, sample(X_test, samples))
SV = convert_shap_values_to_pandas(shap_values, sample(X_test, samples))
else:
shap_values = explainer.shap_values(X_test.values)
shap.summary_plot(shap_values, X_test)
SV = convert_shap_values_to_pandas(shap_values, X_test)
# shap.summary_plot(shap_values_rf, dfX, plot_size=1.1)
return SV
# def get_mean_std_from_df_feats(df, feats=best, ignore=['New', 'Rooms_345', 'Sale_year'],
# log=['Total_ends']):
# import numpy as np
# f = [x for x in best if x not in ignore]
# df1 = df.copy()
# if log is not None:
# df1[log] = (df1[log]+1).apply(np.log)
# mean = df1[f].mean()
# std = df1[f].std()
# return mean, std
def produce_rooms_new_years_from_ds_var(ds, dsvar='beta_coef', new_cat='Used/New',
new='New', old='Used'):
import numpy as np
import pandas as pd
df = ds[dsvar].to_dataset('year').to_dataframe().T
dfs = []
# 3 rooms old:
dff = df['const'].apply(np.exp).to_frame('Price')
dff['Rooms'] = 3
dff[new_cat] = old
dfs.append(dff)
# 3 rooms new:
dff = (df['const']+df['New']).apply(np.exp).to_frame('Price')
dff['Rooms'] = 3
dff[new_cat] = new
dfs.append(dff)
# 4 rooms old:
dff = (df['const']+df['Rooms_4']).apply(np.exp).to_frame('Price')
dff['Rooms'] = 4
dff[new_cat] = old
dfs.append(dff)
# 4 rooms new:
dff = (df['const']+df['New']+df['Rooms_4']+df['Rooms_4_New']).apply(np.exp).to_frame('Price')
dff['Rooms'] = 4
dff[new_cat] = new
dfs.append(dff)
# 5 rooms old:
dff = (df['const']+df['Rooms_5']).apply(np.exp).to_frame('Price')
dff['Rooms'] = 5
dff[new_cat] = old
dfs.append(dff)
# 5 rooms new:
dff = (df['const']+df['New']+df['Rooms_5']+df['Rooms_5_New']).apply(np.exp).to_frame('Price')
dff['Rooms'] = 5
dff[new_cat] = new
dfs.append(dff)
dff = pd.concat(dfs, axis=0)
dff['year'] = dff.index
return dff
def calculate_pct_change_for_long_ds_var(ds_var_long, year=2000):
d = ds_var_long.pivot(index='year', columns=[
'Rooms', 'Old/New'], values='Price')
d_ref = d.loc[year]
d /= d_ref
d -= 1
d *= 100
d['year']=d.index
df = d.melt(id_vars=['year'],value_name='Price')
return df
def calculate_period_pct_change_from_ds(ds, syear=2008, eyear=2019):
beta=produce_rooms_new_years_from_ds_var(ds,'beta_coef')
beta = beta.pivot(index='year', columns=['Rooms', 'Used/New'],
values='Price')
beta.columns = ['{}-{}'.format(rooms, new) for rooms, new in beta.columns]
pct = 100 * (beta.loc[eyear] - beta.loc[syear]) / beta.loc[syear]
return pct
def plot_price_rooms_new_from_new_ds(ds, add_cbs_index=False,
units='nis'):
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from cbs_procedures import read_apt_price_index
from cbs_procedures import read_mean_salary
sns.set_theme(style='ticks', font_scale=1.8)
fig, ax = plt.subplots(figsize=(17, 10))
beta = produce_rooms_new_years_from_ds_var(ds, 'beta_coef')
# calculate pct change between 2008 and 2019:
pct = (beta.loc[2019,'Price'].values-beta.loc[2008,'Price'].values)/beta.loc[2008,'Price'].values
pct *= 100
beta1 = beta.copy()
beta1.loc[2019, 'pct_change_2019_2008'] = pct
print(beta1.loc[2019])
# calculate pct change Old/New in 2008:
pct=(beta[beta['Used/New']=='New'].loc[2008,'Price']-beta[beta['Used/New']=='Used'].loc[2008,'Price'])/beta[beta['Used/New']=='Used'].loc[2008,'Price']
pct *= 100
print(pct)
# calculate pct change Old/New in 2019:
pct=(beta[beta['Used/New']=='New'].loc[2019,'Price']-beta[beta['Used/New']=='Used'].loc[2019,'Price'])/beta[beta['Used/New']=='Used'].loc[2019,'Price']
pct *= 100
print(pct)
upper = produce_rooms_new_years_from_ds_var(ds, 'CI_95_upper')
lower = produce_rooms_new_years_from_ds_var(ds, 'CI_95_lower')
if units == 'pct_change':
beta = calculate_pct_change_for_long_ds_var(beta, 2000)
upper = calculate_pct_change_for_long_ds_var(upper, 2000)
lower = calculate_pct_change_for_long_ds_var(lower, 2000)
df = pd.concat([lower, beta, upper], axis=0)
if units == 'dollar':
# approx 4 NIS to 1 $ in whole 2000-2019
df['Price'] /= 4 * 1000 # price in thousands of $
ylabel = 'Apartment Price [Thousands $]'
elif units == 'nis':
ylabel = 'Apartment Price [millions NIS]'
df['Price'] /= 1e6
elif units == 'salary':
sal = read_mean_salary()
df = pd.merge(df, sal, on='year', how='inner')
df['Price'] /= df['mean_salary']
ylabel = 'Mean salary'
elif units == 'pct_change':
ylabel = 'Apartment price change from 2000 [%]'
df['year'] = pd.to_datetime(df['year'], format='%Y')
df = df.reset_index(drop=True)
sns.lineplot(data=df, x='year', y='Price', hue='Rooms', style='Used/New',
ax=ax, palette='tab10', ci='sd', markers=True, markersize=10)
ax.set_ylabel(ylabel)
ax.set_xlabel('')
if add_cbs_index:
cbs = read_apt_price_index(path=work_david, resample='AS',
normalize_year=2000)
cbs = cbs.loc['2000':'2019']
if units == 'pct_change':
cbs /= cbs.iloc[0]
cbs -= 1
cbs *= 100
cbs_label = 'Dwellings price index change from 2000 [%]'
cbs.columns = ['Apartment Price Index']
cbs['year'] = pd.to_datetime(cbs.index, format='%Y')
if units != 'pct_change':
twin = ax.twinx()
else:
twin = ax
sns.lineplot(data=cbs, x='year', y='Apartment Price Index', ax=twin,
color='k', linewidth=2)
twin.set_ylabel('Dwellings Price Index')
twin.set_xlabel('')
twin.set_ylim(50, 300)
ax.grid(True)
sns.despine(fig)
fig.tight_layout()
return fig
def plot_regular_feats_comparison_from_new_ds(ds,reg_name='Predictor',
feats=best_regular1, units='pct_change'):
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
sns.set_theme(style='ticks', font_scale=1.8)
fig, ax = plt.subplots(figsize=(17, 10))
dfs = []
df = ds['beta_coef'].to_dataset('year').to_dataframe().T
dff = df[feats].melt(ignore_index=False)
dff['year'] = dff.index
dfs.append(dff)
df = ds['CI_95_upper'].to_dataset('year').to_dataframe().T
dff = df[feats].melt(ignore_index=False)
dff['year'] = dff.index
dfs.append(dff)
df = ds['CI_95_lower'].to_dataset('year').to_dataframe().T
dff = df[feats].melt(ignore_index=False)
dff['year'] = dff.index
dfs.append(dff)
dff = pd.concat(dfs, axis=0)
dff['regressor'] = dff['regressor'].map(vars_explained_plot_names)
dff = dff.rename({'regressor': reg_name}, axis=1)
dff['year'] = pd.to_datetime(dff['year'], format='%Y')
dff = dff.reset_index(drop=True)
if units == 'pct_change':
dff['value'] = dff['value'].apply(pct_change)
sns.lineplot(data=dff, x='year', y='value', hue=reg_name,
ax=ax, ci='sd', markers=True,
palette='Dark2')
if units == 'pct_change':
ylabel = 'Price change [%]'
else:
ylabel = r'Standardized $\beta$s'
ax.set_ylabel(ylabel)
ax.set_xlabel('')
h, l = ax.get_legend_handles_labels()
ax.legend_.remove()
ax.legend(h, l, ncol=1, title='Predictor', loc='center')
ax.grid(True)
sns.despine(fig)
fig.tight_layout()
return dff
def prepare_new_X_y_with_year(df, year=2000, y_name='Price', features=best1):
import pandas as pd
def return_X_with_interaction(X, dummy_list, var_list):
Xs = []
for num_var in var_list:
X1 = get_design_with_pair_interaction(
X, dummy_list+[num_var])
Xs.append(X1)
X1 = pd.concat(Xs, axis=1)
X1 = X1.loc[:, ~X1.columns.duplicated()]
return X1
# m, s = get_mean_std_from_df_feats(df)
X, y, scaler = produce_X_y(
df, y_name=y_name, year=year, feats=features, dummy='Rooms_345',
plot_Xcorr=True, scale_X=True)
# X[best_regular] -= m
# X[best_regular] /= s
# regular vars vs. time (years):
# X1 = return_X_with_interaction(X, ['trend'], best_regular)
# rooms dummies and new:
X2 = return_X_with_interaction(X, room_dummies, ['New'])
# rooms dummies and years:
# X3 = return_X_with_interaction(X, ['trend'], room_dummies)
# New and years:
# X4 = return_X_with_interaction(X, year_dummies, ['New'])
X = pd.concat([X, X2],axis=1) #, X3, X4], axis=1)
X = X.loc[:, ~X.columns.duplicated()]
return X, y
def prepare_new_X_y(df, y_name='Price'):
import pandas as pd
def return_X_with_interaction(X, dummy_list, var_list):
Xs = []
for num_var in var_list:
X1 = get_design_with_pair_interaction(
X, dummy_list+[num_var])
Xs.append(X1)
X1 = pd.concat(Xs, axis=1)
X1 = X1.loc[:, ~X1.columns.duplicated()]
return X1
X, y, scaler = produce_X_y(
df, y_name=y_name, year=None, feats=best_years, dummy='Rooms_345',
plot_Xcorr=True, scale_X=True)
# regular vars vs. time (years):
X1 = return_X_with_interaction(X, year_dummies, best_regular)
# rooms dummies and new:
X2 = return_X_with_interaction(X, room_dummies, ['New'])
# rooms dummies and years:
# X3 = return_X_with_interaction(X, year_dummies, room_dummies)
# New and years:
# X4 = return_X_with_interaction(X, year_dummies, ['New'])
X = pd.concat([X1, X2],axis=1) #, X3, X4], axis=1)
X = X.loc[:, ~X.columns.duplicated()]
return X, y
def prepare_new_X_y_with_trend(df, y_name='Price'):
import pandas as pd
def return_X_with_interaction(X, dummy_list, var_list):
Xs = []
for num_var in var_list:
X1 = get_design_with_pair_interaction(
X, dummy_list+[num_var])
Xs.append(X1)
X1 = pd.concat(Xs, axis=1)
X1 = X1.loc[:, ~X1.columns.duplicated()]
return X1
X, y, scaler = produce_X_y(
df, y_name=y_name, year='trend', feats=best, dummy='Rooms_345',
plot_Xcorr=True, scale_X=True)
# regular vars vs. time (years):
X1 = return_X_with_interaction(X, ['trend'], best_regular)
# rooms dummies and new:
X2 = return_X_with_interaction(X, room_dummies, ['New'])
# rooms dummies and years:
X3 = return_X_with_interaction(X, ['trend'], room_dummies)
# New and years:
# X4 = return_X_with_interaction(X, year_dummies, ['New'])
X = pd.concat([X1, X2, X3],axis=1) #, X3, X4], axis=1)
X = X.loc[:, ~X.columns.duplicated()]
return X, y
def get_design_with_pair_interaction(data, group_pair):
""" Get the design matrix with the pairwise interactions
Parameters
----------
data (pandas.DataFrame):
Pandas data frame with the two variables to build the design matrix of their two main effects and their interaction
group_pair (iterator):
List with the name of the two variables (name of the columns) to build the design matrix of their two main effects and their interaction
Returns
-------
x_new (pandas.DataFrame):
Pandas data frame with the design matrix of their two main effects and their interaction
"""
import pandas as pd
import itertools
x = pd.get_dummies(data[group_pair])
interactions_lst = list(
itertools.combinations(
x.columns.tolist(),
2,
),
)
x_new = x.copy()
for level_1, level_2 in interactions_lst:
if level_1.split('_')[0] == level_2.split('_')[0]:
continue
x_new = pd.concat(
[
x_new,
x[level_1] * x[level_2]
],
axis=1,
)
x_new = x_new.rename(
columns = {
0: (level_1 + '_' + level_2)
}
)
return x_new
def calculate_distance_from_gdf_to_employment_centers(gdf, path=work_david, n=4,
weights='Pop2020', inverse=None,
x_coord_name='ITM-E', y_coord_name='ITM-N'):
from cbs_procedures import read_emploment_centers_2008
import numpy as np
gdf = gdf[~gdf[x_coord_name].isnull()]
gdf = gdf[~gdf[y_coord_name].isnull()]
def mean_distance_to_n_mokdim(x, weights=None):
# x = gdf['geometry']
dists = points.distance(x).to_frame('distance')
dists['Pop2020'] = points['Pop2020'] / 1000
dists = dists.sort_values('distance')
if inverse is not None:
dists['distance'] = dists['distance']**inverse
# return dists['distance'].mean()
if weights is None:
mean_dist = dists.iloc[0:n].mean()
else:
mean_dist = np.average(
dists.iloc[0:n]['distance'], weights=dists.iloc[0:n][weights])
return mean_dist.item()
points = read_emploment_centers_2008(path, shape=True)
if n is not None:
gdf['mean_distance_to_{}_mokdim'.format(n)] = gdf['geometry'].apply(
mean_distance_to_n_mokdim, weights=weights)
else:
for i, row in points.iterrows():
print('calculating distance to {}.'.format(row['NameHE']))
name = 'kms_to_{}'.format(i)
gdf[name] = gdf.distance(row['geometry']) / 1000.0
return gdf
def create_total_inout_timeseries_from_migration_network_and_cbs():
from cbs_procedures import read_yearly_inner_migration
from Migration_main import read_all_multi_year_gpickles
from Migration_main import produce_nodes_time_series
Gs = read_all_multi_year_gpickles()
da = produce_nodes_time_series(Gs)
df_in = da.sel(parameter='total_in').reset_coords(
drop=True).to_dataset('node').to_dataframe()
df_out = da.sel(parameter='total_out').reset_coords(
drop=True).to_dataset('node').to_dataframe()
df = read_yearly_inner_migration()
inflow = df[df['year'] == 2018][[
'city_code', 'inflow']].set_index('city_code').T
inflow = inflow.append(
df[df['year'] == 2019][['city_code', 'inflow']].set_index('city_code').T)
inflow.index = [2018, 2019]
inflow.index.name = 'time'
inflow.columns.name = ''
inflow.columns = [str(x) for x in inflow.columns]
outflow = df[df['year'] == 2018][[
'city_code', 'outflow']].set_index('city_code').T
outflow = outflow.append(
df[df['year'] == 2019][['city_code', 'outflow']].set_index('city_code').T)
outflow.index = [2018, 2019]
outflow.index.name = 'time'
outflow.columns.name = ''
outflow.columns = [str(x) for x in outflow.columns]
df_in = df_in.append(inflow)
df_out = df_out.append(outflow)
return df_in, df_out
def prepare_features_and_save(path=work_david, savepath=None):
from nadlan_EDA import load_nadlan_combined_deal
from cbs_procedures import read_school_coords
from cbs_procedures import read_kindergarten_coords
from cbs_procedures import read_historic_SEI
from cbs_procedures import read_building_starts_ends
from cbs_procedures import calculate_building_rates
from Migration_main import path_glob
from cbs_procedures import calculate_minimum_distance_between_two_gdfs
import numpy as np
import pandas as pd
def add_bgr_func(grp, bgr, name='3Rooms_starts'):
# import numpy as np
year = grp['Sale_year'].unique()[0]
cc = grp['city_code'].unique()[0]
try:
if bgr.columns.dtype == 'object':
gr = bgr.loc[year, str(cc)]
elif bgr.columns.dtype == 'int':
gr = bgr.loc[year, cc]
except KeyError:
gr = np.nan
grp[name] = gr
return grp
df = load_nadlan_combined_deal(
add_bgr=None, add_geo_layers=False, return_XY=True)
# add distances to kindergarden, schools, building rates for each room type etc.
print('Adding Building Growth rate.')
bdf = read_building_starts_ends()
for room in ['3rooms', '4rooms', '5rooms', 'Total']:
room_begins = calculate_building_rates(
bdf, phase='Begin', rooms=room, fillna=False)
room_ends = calculate_building_rates(
bdf, phase='End', rooms=room, fillna=False)
df = df.groupby(['Sale_year', 'city_code']).apply(
add_bgr_func, room_begins, name='{}_starts'.format(room))
df = df.groupby(['Sale_year', 'city_code']).apply(
add_bgr_func, room_ends, name='{}_ends'.format(room))
# df.loc[df['{}_starts'.format(room)] == 0] = np.nan
# df.loc[df['{}_ends'.format(room)] == 0] = np.nan
print('Adding minimum distance to kindergartens.')
kinder = read_kindergarten_coords()
df = df.groupby('Sale_year').apply(
calculate_minimum_distance_between_two_gdfs, kinder, 'kindergarten')
df.index = df.index.droplevel(0)
df = df.reset_index(drop=True)
print('Adding minimum distance to schools.')
school = read_school_coords()
df = df.groupby('Sale_year').apply(
calculate_minimum_distance_between_two_gdfs, school, 'school')
df.index = df.index.droplevel(0)
df = df.reset_index(drop=True)
print('Adding historic city-level SEI.')
sei = read_historic_SEI()
sei.loc[2018] = sei.loc[2017]
sei.loc[2019] = sei.loc[2017]
df = df.groupby(['Sale_year', 'city_code']).apply(
add_bgr_func, sei, name='SEI')
# add inflow and outflow:
print('Adding Inflow and Outflow')
dfi, dfo = create_total_inout_timeseries_from_migration_network_and_cbs()
df = df.groupby(['Sale_year', 'city_code']).apply(
add_bgr_func, dfi, name='Inflow')
df = df.groupby(['Sale_year', 'city_code']).apply(
add_bgr_func, dfo, name='Outflow')
# finally drop some cols so saving will not take a lot of space:
df = df.drop(['P2015_cluster2', 'Parcel_Lot', 'Sale_Y_Q', 'Sale_quarter', 'Sale_month', 'District_HE', 'm2_per_room',
'StatArea_ID', 'Building', 'street_code', 'Street', 'ObjectID', 'TREND_FORMAT', 'TREND_IS_NEGATIVE', 'POLYGON_ID'], axis=1)
if savepath is not None:
filename = 'Nadaln_with_features.csv'
df.to_csv(savepath/filename, na_rep='None', index=False)
print('{} was saved to {}.'.format(filename, savepath))
return df
def calc_vif(X, dropna=True, asfloat=True, remove_mean=True):
import pandas as pd
from statsmodels.stats.outliers_influence import variance_inflation_factor
if dropna:
print('dropping na.')
X = X.dropna()
if asfloat:
print('considering as float.')
X = X.astype(float)
if remove_mean:
X = X - X.mean()
# Calculating VIF
vif = pd.DataFrame()
vif["variables"] = X.columns
vif["VIF"] = [variance_inflation_factor(
X.values, i) for i in range(X.shape[1])]
return(vif)
def interpert_beta_coefs(ds, name='beta_coef', dummies=dummies):
import numpy as np
import xarray as xr
ds1 = ds[name].to_dataset('regressor')
if len(ds1.dims) == 0:
df = ds1.expand_dims('dumm').to_dataframe()
else:
df = ds1.to_dataframe()
betas = []
# interpet dummy variables:
for dummy in dummies:
print('interperting {} variable.'.format(dummy))
ser = 100*(np.exp(df[dummy])-1)
da = ser.to_xarray()
betas.append(da)
# interpet regular log variables:
# for every 10% change in var, the predicted log var is changed...:
regulars = [x for x in ds['regressor'].values if x not in dummies]
if 'const' in regulars:
regulars.remove('const')
if 'dumm' in regulars:
regulars.remove('dumm')
for regular in regulars:
print('interperting {} variable.'.format(regular))
ser = 100*(1.1**df[regular]-1)
da = ser.to_xarray()
betas.append(da)
# now, the constant is the geometric mean of the Price:
da = np.exp(df['const']).to_xarray()
betas.append(da)
beta = xr.merge(betas)
try:
beta = beta.to_array('regressor').drop('dumm')
except ValueError:
beta = beta.to_array('regressor')
# beta = beta.sortby(ds['regressor'])
ds['{}_inter'.format(name)] = beta.transpose().squeeze()
return ds
def scale_log(df, cols=None, plus1_cols=None):
import numpy as np
import pandas as pd
if cols is None:
df_scaled = df.copy()
for col in df.columns:
if plus1_cols is None:
df_scaled[col] = df[col].apply(np.log)
else:
print('{} is scaled using log(x+1)!'.format(col))
df_scaled[col] = (df[col]+1).apply(np.log)
else:
print('scaling only {} cols.'.format(cols))
df_sliced = df[cols]
df_scaled = df_sliced.copy()
for col in df_sliced.columns:
if plus1_cols is None:
df_scaled[col] = df_sliced[col].apply(np.log)
else:
print('{} is scaled using log(x+1)!'.format(col))
df_scaled[col] = (df[col]+1).apply(np.log)
df_rest = df[[x for x in df.columns if x not in cols]]
df_scaled = pd.concat([df_scaled, df_rest], axis=1)
df_scaled = df_scaled[[x for x in df.columns]]
return df_scaled
def scale_df(df, scaler, cols=None):
import pandas as pd
print('using {} scaler.'.format(scaler.__repr__()))
if cols is None:
scaled_vals = scaler.fit_transform(df)
df_scaled = pd.DataFrame(scaled_vals)
df_scaled.columns = df.columns
else:
print('scaling only {} cols.'.format(cols))
df_sliced = df[cols]
scaled_vals = scaler.fit_transform(df_sliced)
df_scaled = pd.DataFrame(scaled_vals)
df_scaled.columns = cols
df_rest = df[[x for x in df.columns if x not in cols]]
df_scaled = | pd.concat([df_scaled, df_rest], axis=1) | pandas.concat |
"""
Copyright 2020 The Google Earth Engine Community Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import h5py
import numpy as np
import pandas as pd
import os
import sys
def rhv(h5file, file):
'''
extract rh values and some qa flags:
/BEAMXXXX/rx_assess/quality_flag == 1 L2A
/BEAMXXXX/rx_assess/rx_maxamp > (8 * /BEAMXXXX/rx_assess/sd_corrected) L2A Check with Michelle on lowering sd_corrected multiplier
/BEAMXXXX/rx_processing_a<n>/rx_algrunflag == 1 L2A <n> is equal to the value of /BEAMXXXX/selected_algorithm
/BEAMXXXX/rx_processing_a<n>/zcross > 0 L2A <n> is equal to the value of /BEAMXXXX/selected_algorithm
/BEAMXXXX/rx_processing_a<n>/toploc > 0 L2A <n> is equal to the value of /BEAMXXXX/selected_algorithm
/BEAMXXXX/sensitivity > 0 L2A
/BEAMXXXX/sensitivity <= 1 L2A
In this implementaiton, assume that all the shots in rh are using the same algorithm.
:param h5file: gedi l2a file
:param file: csv file
:return:
'''
f = h5py.File(h5file)
fmt = '%3.6f,%3.6f,%d,%8.4f,%3.2f'
with open(file, 'w') as oufh:
# oufh.writelines('lon,lat,rh98\n')
#oufh.writelines('lon,lat,beam,channel,acquisition_date,rh98\n')
is_first = True
for k in f.keys():
if not k.startswith('BEAM'):
continue
print('\t',k)
lat = f[f'{k}/lat_lowestmode']
lon = f[f'{k}/lon_lowestmode']
beam = f[f'{k}/beam']
channel = f[f'{k}/channel']
#dtime = np.array(f[f'{k}/delta_time']) * 1000 + 1514764800000
dtime = np.array(f[f'{k}/delta_time']) + 1514764800
degrade = f[f'{k}/degrade_flag']
quality = f[f'{k}/quality_flag']
sensitivity = f[f'{k}/sensitivity']
rx_quality = f[f'{k}/rx_assess/quality_flag']
# assuming all shots using the same alborithm, randomly picked the 1000th indexed element
algorithm = f[f'{k}/selected_algorithm'][1000]
rx_algrunflag = f[f'{k}/rx_processing_a{algorithm}/rx_algrunflag']
zcross = f[f'{k}/rx_processing_a{algorithm}/zcross']
toploc = f[f'{k}/rx_processing_a{algorithm}/toploc']
rh = f[f'{k}/rh']
quantiles = (10,20,30,40,50,60,70,80,90,98)
rh = rh[:, quantiles]
names = [f'rh{x}' for x in quantiles]
drh = pd.DataFrame(rh, columns=names)
ds = {'lon': lon,
'lat': lat,
'beam': beam,
'channel': channel,
'dtime': dtime,
'degrade': degrade,
'quality': quality,
'sensitivity': sensitivity,
'rx_quality': rx_quality,
'rx_algrunflag': rx_algrunflag,
'zcross': zcross,
'toploc': toploc
}
df = | pd.DataFrame(ds) | pandas.DataFrame |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390: pd.Timestamp("2013-05-27 00:00:00"),
391: pd.Timestamp("2013-05-28 00:00:00"),
392: pd.Timestamp("2013-05-29 00:00:00"),
393: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.348604308646497,
1: 8.348964254851197,
2: 8.349324201055898,
3: 8.349684147260598,
4: 8.350044093465298,
5: 8.350404039669998,
6: 8.3507639858747,
7: 8.3511239320794,
8: 8.3514838782841,
9: 8.351843824488801,
10: 8.352203770693501,
11: 8.352563716898201,
12: 8.352923663102903,
13: 8.353283609307603,
14: 8.353643555512303,
15: 8.354003501717003,
16: 8.354363447921704,
17: 8.354723394126404,
18: 8.355083340331104,
19: 8.355443286535806,
20: 8.355803232740506,
21: 8.356163178945206,
22: 8.356523125149906,
23: 8.356883071354607,
24: 8.357243017559307,
25: 8.357602963764007,
26: 8.357962909968709,
27: 8.358322856173409,
28: 8.358682802378109,
29: 8.35904274858281,
30: 8.35940269478751,
31: 8.35976264099221,
32: 8.36012258719691,
33: 8.360482533401612,
34: 8.360842479606312,
35: 8.361202425811012,
36: 8.361562372015714,
37: 8.361922318220413,
38: 8.362282264425113,
39: 8.362642210629813,
40: 8.363002156834515,
41: 8.363362103039215,
42: 8.363722049243915,
43: 8.364081995448617,
44: 8.364441941653316,
45: 8.364801887858016,
46: 8.365161834062716,
47: 8.365521780267418,
48: 8.365881726472118,
49: 8.366241672676818,
50: 8.36660161888152,
51: 8.36696156508622,
52: 8.36732151129092,
53: 8.367681457495621,
54: 8.368041403700321,
55: 8.368401349905021,
56: 8.36876129610972,
57: 8.369121242314423,
58: 8.369481188519122,
59: 8.369841134723822,
60: 8.370201080928524,
61: 8.370561027133224,
62: 8.370920973337924,
63: 8.371280919542624,
64: 8.371640865747326,
65: 8.372000811952026,
66: 8.372360758156725,
67: 8.372720704361427,
68: 8.373080650566127,
69: 8.373440596770827,
70: 8.373800542975529,
71: 8.374160489180229,
72: 8.374520435384929,
73: 8.374880381589628,
74: 8.37524032779433,
75: 8.37560027399903,
76: 8.37596022020373,
77: 8.376320166408432,
78: 8.376680112613132,
79: 8.377040058817832,
80: 8.377400005022531,
81: 8.377759951227233,
82: 8.378119897431933,
83: 8.378479843636633,
84: 8.378839789841335,
85: 8.379199736046035,
86: 8.379559682250735,
87: 8.379919628455436,
88: 8.380279574660136,
89: 8.380639520864836,
90: 8.380999467069536,
91: 8.381359413274238,
92: 8.381719359478938,
93: 8.382079305683638,
94: 8.38243925188834,
95: 8.38279919809304,
96: 8.38315914429774,
97: 8.383519090502439,
98: 8.38387903670714,
99: 8.38423898291184,
100: 8.38459892911654,
101: 8.384958875321242,
102: 8.385318821525942,
103: 8.385678767730642,
104: 8.386038713935344,
105: 8.386398660140044,
106: 8.386758606344744,
107: 8.387118552549444,
108: 8.387478498754145,
109: 8.387838444958845,
110: 8.388198391163545,
111: 8.388558337368247,
112: 8.388918283572947,
113: 8.389278229777647,
114: 8.389638175982347,
115: 8.389998122187048,
116: 8.390358068391748,
117: 8.390718014596448,
118: 8.39107796080115,
119: 8.39143790700585,
120: 8.39179785321055,
121: 8.392157799415251,
122: 8.392517745619951,
123: 8.392877691824651,
124: 8.393237638029351,
125: 8.393597584234053,
126: 8.393957530438753,
127: 8.394317476643453,
128: 8.394677422848154,
129: 8.395037369052854,
130: 8.395397315257554,
131: 8.395757261462254,
132: 8.396117207666956,
133: 8.396477153871656,
134: 8.396837100076356,
135: 8.397197046281057,
136: 8.397556992485757,
137: 8.397916938690457,
138: 8.398276884895157,
139: 8.398636831099859,
140: 8.398996777304559,
141: 8.399356723509259,
142: 8.39971666971396,
143: 8.40007661591866,
144: 8.40043656212336,
145: 8.400796508328062,
146: 8.401156454532762,
147: 8.401516400737462,
148: 8.401876346942162,
149: 8.402236293146863,
150: 8.402596239351563,
151: 8.402956185556263,
152: 8.403316131760965,
153: 8.403676077965665,
154: 8.404036024170365,
155: 8.404395970375065,
156: 8.404755916579767,
157: 8.405115862784466,
158: 8.405475808989166,
159: 8.405835755193868,
160: 8.406195701398568,
161: 8.406555647603268,
162: 8.40691559380797,
163: 8.40727554001267,
164: 8.40763548621737,
165: 8.40799543242207,
166: 8.408355378626771,
167: 8.408715324831471,
168: 8.409075271036171,
169: 8.409435217240873,
170: 8.409795163445573,
171: 8.410155109650272,
172: 8.410515055854972,
173: 8.410875002059674,
174: 8.411234948264374,
175: 8.411594894469074,
176: 8.411954840673776,
177: 8.412314786878476,
178: 8.412674733083175,
179: 8.413034679287877,
180: 8.413394625492577,
181: 8.413754571697277,
182: 8.414114517901977,
183: 8.414474464106679,
184: 8.414834410311379,
185: 8.415194356516078,
186: 8.41555430272078,
187: 8.41591424892548,
188: 8.41627419513018,
189: 8.41663414133488,
190: 8.416994087539582,
191: 8.417354033744282,
192: 8.417713979948982,
193: 8.418073926153683,
194: 8.418433872358383,
195: 8.418793818563083,
196: 8.419153764767785,
197: 8.419513710972485,
198: 8.419873657177185,
199: 8.420233603381885,
200: 8.420593549586586,
201: 8.420953495791286,
202: 8.421313441995986,
203: 8.421673388200688,
204: 8.422033334405388,
205: 8.422393280610088,
206: 8.422753226814788,
207: 8.42311317301949,
208: 8.42347311922419,
209: 8.423833065428889,
210: 8.42419301163359,
211: 8.42455295783829,
212: 8.42491290404299,
213: 8.42527285024769,
214: 8.425632796452392,
215: 8.425992742657092,
216: 8.426352688861792,
217: 8.426712635066494,
218: 8.427072581271194,
219: 8.427432527475894,
220: 8.427792473680595,
221: 8.428152419885295,
222: 8.428512366089995,
223: 8.428872312294695,
224: 8.429232258499397,
225: 8.429592204704097,
226: 8.429952150908797,
227: 8.430312097113498,
228: 8.430672043318198,
229: 8.431031989522898,
230: 8.431391935727598,
231: 8.4317518819323,
232: 8.432111828137,
233: 8.4324717743417,
234: 8.432831720546401,
235: 8.433191666751101,
236: 8.433551612955801,
237: 8.433911559160503,
238: 8.434271505365203,
239: 8.434631451569903,
240: 8.434991397774603,
241: 8.435351343979304,
242: 8.435711290184004,
243: 8.436071236388704,
244: 8.436431182593406,
245: 8.436791128798106,
246: 8.437151075002806,
247: 8.437511021207506,
248: 8.437870967412207,
249: 8.438230913616907,
250: 8.438590859821607,
251: 8.438950806026309,
252: 8.439310752231009,
253: 8.439670698435709,
254: 8.44003064464041,
255: 8.44039059084511,
256: 8.44075053704981,
257: 8.44111048325451,
258: 8.441470429459212,
259: 8.441830375663912,
260: 8.442190321868612,
261: 8.442550268073314,
262: 8.442910214278013,
263: 8.443270160482713,
264: 8.443630106687413,
265: 8.443990052892115,
266: 8.444349999096815,
267: 8.444709945301515,
268: 8.445069891506217,
269: 8.445429837710916,
270: 8.445789783915616,
271: 8.446149730120318,
272: 8.446509676325018,
273: 8.446869622529718,
274: 8.447229568734418,
275: 8.44758951493912,
276: 8.44794946114382,
277: 8.44830940734852,
278: 8.448669353553221,
279: 8.449029299757921,
280: 8.449389245962621,
281: 8.449749192167321,
282: 8.450109138372023,
283: 8.450469084576723,
284: 8.450829030781422,
285: 8.451188976986124,
286: 8.451548923190824,
287: 8.451908869395524,
288: 8.452268815600226,
289: 8.452628761804926,
290: 8.452988708009626,
291: 8.453348654214325,
292: 8.453708600419027,
293: 8.454068546623727,
294: 8.454428492828427,
295: 8.454788439033129,
296: 8.455148385237829,
297: 8.455508331442529,
298: 8.455868277647228,
299: 8.45622822385193,
300: 8.45658817005663,
301: 8.45694811626133,
302: 8.457308062466032,
303: 8.457668008670732,
304: 8.458027954875432,
305: 8.458387901080131,
306: 8.458747847284833,
307: 8.459107793489533,
308: 8.459467739694233,
309: 8.459827685898935,
310: 8.460187632103635,
311: 8.460547578308335,
312: 8.460907524513036,
313: 8.461267470717736,
314: 8.461627416922436,
315: 8.461987363127136,
316: 8.462347309331838,
317: 8.462707255536538,
318: 8.463067201741238,
319: 8.46342714794594,
320: 8.46378709415064,
321: 8.46414704035534,
322: 8.464506986560039,
323: 8.46486693276474,
324: 8.46522687896944,
325: 8.46558682517414,
326: 8.465946771378842,
327: 8.466306717583542,
328: 8.466666663788242,
329: 8.467026609992944,
330: 8.467386556197644,
331: 8.467746502402344,
332: 8.468106448607044,
333: 8.468466394811745,
334: 8.468826341016445,
335: 8.469186287221145,
336: 8.469546233425847,
337: 8.469906179630547,
338: 8.470266125835247,
339: 8.470626072039947,
340: 8.470986018244648,
341: 8.471345964449348,
342: 8.471705910654048,
343: 8.47206585685875,
344: 8.47242580306345,
345: 8.47278574926815,
346: 8.473145695472851,
347: 8.473505641677551,
348: 8.473865587882251,
349: 8.474225534086951,
350: 8.474585480291653,
351: 8.474945426496353,
352: 8.475305372701053,
353: 8.475665318905754,
354: 8.476025265110454,
355: 8.476385211315154,
356: 8.476745157519854,
357: 8.477105103724556,
358: 8.477465049929256,
359: 8.477824996133956,
360: 8.478184942338657,
361: 8.478544888543357,
362: 8.478904834748057,
363: 8.479264780952759,
364: 8.479624727157459,
365: 8.479984673362159,
366: 8.480344619566859,
367: 8.48070456577156,
368: 8.48106451197626,
369: 8.48142445818096,
370: 8.481784404385662,
371: 8.482144350590362,
372: 8.482504296795062,
373: 8.482864242999762,
374: 8.483224189204464,
375: 8.483584135409163,
376: 8.483944081613863,
377: 8.484304027818565,
378: 8.484663974023265,
379: 8.485023920227965,
380: 8.485383866432667,
381: 8.485743812637367,
382: 8.486103758842066,
383: 8.486463705046766,
384: 8.486823651251468,
385: 8.487183597456168,
386: 8.487543543660868,
387: 8.48790348986557,
388: 8.48826343607027,
389: 8.48862338227497,
390: 8.48898332847967,
391: 8.489343274684371,
392: 8.489703220889071,
393: 8.490063167093771,
},
"fcst_lower": {
0: -np.inf,
1: -np.inf,
2: -np.inf,
3: -np.inf,
4: -np.inf,
5: -np.inf,
6: -np.inf,
7: -np.inf,
8: -np.inf,
9: -np.inf,
10: -np.inf,
11: -np.inf,
12: -np.inf,
13: -np.inf,
14: -np.inf,
15: -np.inf,
16: -np.inf,
17: -np.inf,
18: -np.inf,
19: -np.inf,
20: -np.inf,
21: -np.inf,
22: -np.inf,
23: -np.inf,
24: -np.inf,
25: -np.inf,
26: -np.inf,
27: -np.inf,
28: -np.inf,
29: -np.inf,
30: -np.inf,
31: -np.inf,
32: -np.inf,
33: -np.inf,
34: -np.inf,
35: -np.inf,
36: -np.inf,
37: -np.inf,
38: -np.inf,
39: -np.inf,
40: -np.inf,
41: -np.inf,
42: -np.inf,
43: -np.inf,
44: -np.inf,
45: -np.inf,
46: -np.inf,
47: -np.inf,
48: -np.inf,
49: -np.inf,
50: -np.inf,
51: -np.inf,
52: -np.inf,
53: -np.inf,
54: -np.inf,
55: -np.inf,
56: -np.inf,
57: -np.inf,
58: -np.inf,
59: -np.inf,
60: -np.inf,
61: -np.inf,
62: -np.inf,
63: -np.inf,
64: -np.inf,
65: -np.inf,
66: -np.inf,
67: -np.inf,
68: -np.inf,
69: -np.inf,
70: -np.inf,
71: -np.inf,
72: -np.inf,
73: -np.inf,
74: -np.inf,
75: -np.inf,
76: -np.inf,
77: -np.inf,
78: -np.inf,
79: -np.inf,
80: -np.inf,
81: -np.inf,
82: -np.inf,
83: -np.inf,
84: -np.inf,
85: -np.inf,
86: -np.inf,
87: -np.inf,
88: -np.inf,
89: -np.inf,
90: -np.inf,
91: -np.inf,
92: -np.inf,
93: -np.inf,
94: -np.inf,
95: -np.inf,
96: -np.inf,
97: -np.inf,
98: -np.inf,
99: -np.inf,
100: -np.inf,
101: -np.inf,
102: -np.inf,
103: -np.inf,
104: -np.inf,
105: -np.inf,
106: -np.inf,
107: -np.inf,
108: -np.inf,
109: -np.inf,
110: -np.inf,
111: -np.inf,
112: -np.inf,
113: -np.inf,
114: -np.inf,
115: -np.inf,
116: -np.inf,
117: -np.inf,
118: -np.inf,
119: -np.inf,
120: -np.inf,
121: -np.inf,
122: -np.inf,
123: -np.inf,
124: -np.inf,
125: -np.inf,
126: -np.inf,
127: -np.inf,
128: -np.inf,
129: -np.inf,
130: -np.inf,
131: -np.inf,
132: -np.inf,
133: -np.inf,
134: -np.inf,
135: -np.inf,
136: -np.inf,
137: -np.inf,
138: -np.inf,
139: -np.inf,
140: -np.inf,
141: -np.inf,
142: -np.inf,
143: -np.inf,
144: -np.inf,
145: -np.inf,
146: -np.inf,
147: -np.inf,
148: -np.inf,
149: -np.inf,
150: -np.inf,
151: -np.inf,
152: -np.inf,
153: -np.inf,
154: -np.inf,
155: -np.inf,
156: -np.inf,
157: -np.inf,
158: -np.inf,
159: -np.inf,
160: -np.inf,
161: -np.inf,
162: -np.inf,
163: -np.inf,
164: -np.inf,
165: -np.inf,
166: -np.inf,
167: -np.inf,
168: -np.inf,
169: -np.inf,
170: -np.inf,
171: -np.inf,
172: -np.inf,
173: -np.inf,
174: -np.inf,
175: -np.inf,
176: -np.inf,
177: -np.inf,
178: -np.inf,
179: -np.inf,
180: -np.inf,
181: -np.inf,
182: -np.inf,
183: -np.inf,
184: -np.inf,
185: -np.inf,
186: -np.inf,
187: -np.inf,
188: -np.inf,
189: -np.inf,
190: -np.inf,
191: -np.inf,
192: -np.inf,
193: -np.inf,
194: -np.inf,
195: -np.inf,
196: -np.inf,
197: -np.inf,
198: -np.inf,
199: -np.inf,
200: -np.inf,
201: -np.inf,
202: -np.inf,
203: -np.inf,
204: -np.inf,
205: -np.inf,
206: -np.inf,
207: -np.inf,
208: -np.inf,
209: -np.inf,
210: -np.inf,
211: -np.inf,
212: -np.inf,
213: -np.inf,
214: -np.inf,
215: -np.inf,
216: -np.inf,
217: -np.inf,
218: -np.inf,
219: -np.inf,
220: -np.inf,
221: -np.inf,
222: -np.inf,
223: -np.inf,
224: -np.inf,
225: -np.inf,
226: -np.inf,
227: -np.inf,
228: -np.inf,
229: -np.inf,
230: -np.inf,
231: -np.inf,
232: -np.inf,
233: -np.inf,
234: -np.inf,
235: -np.inf,
236: -np.inf,
237: -np.inf,
238: -np.inf,
239: -np.inf,
240: -np.inf,
241: -np.inf,
242: -np.inf,
243: -np.inf,
244: -np.inf,
245: -np.inf,
246: -np.inf,
247: -np.inf,
248: -np.inf,
249: -np.inf,
250: -np.inf,
251: -np.inf,
252: -np.inf,
253: -np.inf,
254: -np.inf,
255: -np.inf,
256: -np.inf,
257: -np.inf,
258: -np.inf,
259: -np.inf,
260: -np.inf,
261: -np.inf,
262: -np.inf,
263: -np.inf,
264: -np.inf,
265: -np.inf,
266: -np.inf,
267: -np.inf,
268: -np.inf,
269: -np.inf,
270: -np.inf,
271: -np.inf,
272: -np.inf,
273: -np.inf,
274: -np.inf,
275: -np.inf,
276: -np.inf,
277: -np.inf,
278: -np.inf,
279: -np.inf,
280: -np.inf,
281: -np.inf,
282: -np.inf,
283: -np.inf,
284: -np.inf,
285: -np.inf,
286: -np.inf,
287: -np.inf,
288: -np.inf,
289: -np.inf,
290: -np.inf,
291: -np.inf,
292: -np.inf,
293: -np.inf,
294: -np.inf,
295: -np.inf,
296: -np.inf,
297: -np.inf,
298: -np.inf,
299: -np.inf,
300: -np.inf,
301: -np.inf,
302: -np.inf,
303: -np.inf,
304: -np.inf,
305: -np.inf,
306: -np.inf,
307: -np.inf,
308: -np.inf,
309: -np.inf,
310: -np.inf,
311: -np.inf,
312: -np.inf,
313: -np.inf,
314: -np.inf,
315: -np.inf,
316: -np.inf,
317: -np.inf,
318: -np.inf,
319: -np.inf,
320: -np.inf,
321: -np.inf,
322: -np.inf,
323: -np.inf,
324: -np.inf,
325: -np.inf,
326: -np.inf,
327: -np.inf,
328: -np.inf,
329: -np.inf,
330: -np.inf,
331: -np.inf,
332: -np.inf,
333: -np.inf,
334: -np.inf,
335: -np.inf,
336: -np.inf,
337: -np.inf,
338: -np.inf,
339: -np.inf,
340: -np.inf,
341: -np.inf,
342: -np.inf,
343: -np.inf,
344: -np.inf,
345: -np.inf,
346: -np.inf,
347: -np.inf,
348: -np.inf,
349: -np.inf,
350: -np.inf,
351: -np.inf,
352: -np.inf,
353: -np.inf,
354: -np.inf,
355: -np.inf,
356: -np.inf,
357: -np.inf,
358: -np.inf,
359: -np.inf,
360: -np.inf,
361: -np.inf,
362: -np.inf,
363: -np.inf,
364: -np.inf,
365: -np.inf,
366: -np.inf,
367: -np.inf,
368: -np.inf,
369: -np.inf,
370: -np.inf,
371: -np.inf,
372: -np.inf,
373: -np.inf,
374: -np.inf,
375: -np.inf,
376: -np.inf,
377: -np.inf,
378: -np.inf,
379: -np.inf,
380: -np.inf,
381: -np.inf,
382: -np.inf,
383: -np.inf,
384: -np.inf,
385: -np.inf,
386: -np.inf,
387: -np.inf,
388: -np.inf,
389: -np.inf,
390: -np.inf,
391: -np.inf,
392: -np.inf,
393: -np.inf,
},
"fcst_upper": {
0: np.inf,
1: np.inf,
2: np.inf,
3: np.inf,
4: np.inf,
5: np.inf,
6: np.inf,
7: np.inf,
8: np.inf,
9: np.inf,
10: np.inf,
11: np.inf,
12: np.inf,
13: np.inf,
14: np.inf,
15: np.inf,
16: np.inf,
17: np.inf,
18: np.inf,
19: np.inf,
20: np.inf,
21: np.inf,
22: np.inf,
23: np.inf,
24: np.inf,
25: np.inf,
26: np.inf,
27: np.inf,
28: np.inf,
29: np.inf,
30: np.inf,
31: np.inf,
32: np.inf,
33: np.inf,
34: np.inf,
35: np.inf,
36: np.inf,
37: np.inf,
38: np.inf,
39: np.inf,
40: np.inf,
41: np.inf,
42: np.inf,
43: np.inf,
44: np.inf,
45: np.inf,
46: np.inf,
47: np.inf,
48: np.inf,
49: np.inf,
50: np.inf,
51: np.inf,
52: np.inf,
53: np.inf,
54: np.inf,
55: np.inf,
56: np.inf,
57: np.inf,
58: np.inf,
59: np.inf,
60: np.inf,
61: np.inf,
62: np.inf,
63: np.inf,
64: np.inf,
65: np.inf,
66: np.inf,
67: np.inf,
68: np.inf,
69: np.inf,
70: np.inf,
71: np.inf,
72: np.inf,
73: np.inf,
74: np.inf,
75: np.inf,
76: np.inf,
77: np.inf,
78: np.inf,
79: np.inf,
80: np.inf,
81: np.inf,
82: np.inf,
83: np.inf,
84: np.inf,
85: np.inf,
86: np.inf,
87: np.inf,
88: np.inf,
89: np.inf,
90: np.inf,
91: np.inf,
92: np.inf,
93: np.inf,
94: np.inf,
95: np.inf,
96: np.inf,
97: np.inf,
98: np.inf,
99: np.inf,
100: np.inf,
101: np.inf,
102: np.inf,
103: np.inf,
104: np.inf,
105: np.inf,
106: np.inf,
107: np.inf,
108: np.inf,
109: np.inf,
110: np.inf,
111: np.inf,
112: np.inf,
113: np.inf,
114: np.inf,
115: np.inf,
116: np.inf,
117: np.inf,
118: np.inf,
119: np.inf,
120: np.inf,
121: np.inf,
122: np.inf,
123: np.inf,
124: np.inf,
125: np.inf,
126: np.inf,
127: np.inf,
128: np.inf,
129: np.inf,
130: np.inf,
131: np.inf,
132: np.inf,
133: np.inf,
134: np.inf,
135: np.inf,
136: np.inf,
137: np.inf,
138: np.inf,
139: np.inf,
140: np.inf,
141: np.inf,
142: np.inf,
143: np.inf,
144: np.inf,
145: np.inf,
146: np.inf,
147: np.inf,
148: np.inf,
149: np.inf,
150: np.inf,
151: np.inf,
152: np.inf,
153: np.inf,
154: np.inf,
155: np.inf,
156: np.inf,
157: np.inf,
158: np.inf,
159: np.inf,
160: np.inf,
161: np.inf,
162: np.inf,
163: np.inf,
164: np.inf,
165: np.inf,
166: np.inf,
167: np.inf,
168: np.inf,
169: np.inf,
170: np.inf,
171: np.inf,
172: np.inf,
173: np.inf,
174: np.inf,
175: np.inf,
176: np.inf,
177: np.inf,
178: np.inf,
179: np.inf,
180: np.inf,
181: np.inf,
182: np.inf,
183: np.inf,
184: np.inf,
185: np.inf,
186: np.inf,
187: np.inf,
188: np.inf,
189: np.inf,
190: np.inf,
191: np.inf,
192: np.inf,
193: np.inf,
194: np.inf,
195: np.inf,
196: np.inf,
197: np.inf,
198: np.inf,
199: np.inf,
200: np.inf,
201: np.inf,
202: np.inf,
203: np.inf,
204: np.inf,
205: np.inf,
206: np.inf,
207: np.inf,
208: np.inf,
209: np.inf,
210: np.inf,
211: np.inf,
212: np.inf,
213: np.inf,
214: np.inf,
215: np.inf,
216: np.inf,
217: np.inf,
218: np.inf,
219: np.inf,
220: np.inf,
221: np.inf,
222: np.inf,
223: np.inf,
224: np.inf,
225: np.inf,
226: np.inf,
227: np.inf,
228: np.inf,
229: np.inf,
230: np.inf,
231: np.inf,
232: np.inf,
233: np.inf,
234: np.inf,
235: np.inf,
236: np.inf,
237: np.inf,
238: np.inf,
239: np.inf,
240: np.inf,
241: np.inf,
242: np.inf,
243: np.inf,
244: np.inf,
245: np.inf,
246: np.inf,
247: np.inf,
248: np.inf,
249: np.inf,
250: np.inf,
251: np.inf,
252: np.inf,
253: np.inf,
254: np.inf,
255: np.inf,
256: np.inf,
257: np.inf,
258: np.inf,
259: np.inf,
260: np.inf,
261: np.inf,
262: np.inf,
263: np.inf,
264: np.inf,
265: np.inf,
266: np.inf,
267: np.inf,
268: np.inf,
269: np.inf,
270: np.inf,
271: np.inf,
272: np.inf,
273: np.inf,
274: np.inf,
275: np.inf,
276: np.inf,
277: np.inf,
278: np.inf,
279: np.inf,
280: np.inf,
281: np.inf,
282: np.inf,
283: np.inf,
284: np.inf,
285: np.inf,
286: np.inf,
287: np.inf,
288: np.inf,
289: np.inf,
290: np.inf,
291: np.inf,
292: np.inf,
293: np.inf,
294: np.inf,
295: np.inf,
296: np.inf,
297: np.inf,
298: np.inf,
299: np.inf,
300: np.inf,
301: np.inf,
302: np.inf,
303: np.inf,
304: np.inf,
305: np.inf,
306: np.inf,
307: np.inf,
308: np.inf,
309: np.inf,
310: np.inf,
311: np.inf,
312: np.inf,
313: np.inf,
314: np.inf,
315: np.inf,
316: np.inf,
317: np.inf,
318: np.inf,
319: np.inf,
320: np.inf,
321: np.inf,
322: np.inf,
323: np.inf,
324: np.inf,
325: np.inf,
326: np.inf,
327: np.inf,
328: np.inf,
329: np.inf,
330: np.inf,
331: np.inf,
332: np.inf,
333: np.inf,
334: np.inf,
335: np.inf,
336: np.inf,
337: np.inf,
338: np.inf,
339: np.inf,
340: np.inf,
341: np.inf,
342: np.inf,
343: np.inf,
344: np.inf,
345: np.inf,
346: np.inf,
347: np.inf,
348: np.inf,
349: np.inf,
350: np.inf,
351: np.inf,
352: np.inf,
353: np.inf,
354: np.inf,
355: np.inf,
356: np.inf,
357: np.inf,
358: np.inf,
359: np.inf,
360: np.inf,
361: np.inf,
362: np.inf,
363: np.inf,
364: np.inf,
365: np.inf,
366: np.inf,
367: np.inf,
368: np.inf,
369: np.inf,
370: np.inf,
371: np.inf,
372: np.inf,
373: np.inf,
374: np.inf,
375: np.inf,
376: np.inf,
377: np.inf,
378: np.inf,
379: np.inf,
380: np.inf,
381: np.inf,
382: np.inf,
383: np.inf,
384: np.inf,
385: np.inf,
386: np.inf,
387: np.inf,
388: np.inf,
389: np.inf,
390: np.inf,
391: np.inf,
392: np.inf,
393: np.inf,
},
}
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: | pd.Timestamp("2012-05-28 00:00:00") | pandas.Timestamp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.