prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import logging
import os
import sys
import pandas as pd
import pytest
import handy as hd
log: logging.Logger
@pytest.fixture
def setup_logging():
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
global log
log = logging.getLogger('handy test')
log.setLevel(logging.INFO)
return log
def test_nothing(setup_logging):
global log
# this is to show how to use logging with pycharm + pytest
# it will be printed if pytest is run with options `-p no:logging -s`
# Add them to "Additional Arguments" in your Run Configuration or
# Run Configuration Template for pytest
print('\nignore the communicates from this method. This is test.')
print('this is how to print to console, without logging')
log.warning('Just a test warning message. Ignore it.')
log.warning('This is how to print a warning with logging')
log.info('This is how to print an info with logging')
assert True, "dummy assertion"
def test_round_up():
assert hd.round_up(98) == 100
assert hd.round_up(55) == 60
assert hd.round_up(10) == 10
assert hd.round_up(345) == 400
def test_tidy_bins():
assert hd.tidy_bins([21,35,92], 4).tolist() == [0,25,50,75,100]
assert hd.tidy_bins([21,35,92]).tolist() == [0,10,20,30,40,50,60,70,80,90,100]
assert hd.tidy_bins([21,35,92], 5).tolist() == [0,20,40,60,80,100]
def test_to_datetime():
days = ['2021-04-05 00:00', # Mon
'2021-04-10 11:46', # Sat
'2021-04-11 23:59' # Sun
]
df = pd.DataFrame({'input': days})
df = hd.to_datetime(df, input_column='input', output_column='output')
assert df.output[2] == | pd.to_datetime(days[2]) | pandas.to_datetime |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from . import _unittest as unittest
try:
import pandas
except ImportError:
pandas = None
from datatest._compatibility.collections.abc import Iterator
from datatest._utils import IterItems
from datatest._vendor.repeatingcontainer import RepeatingContainer
class TestRepeatingContainer(unittest.TestCase):
def test_init_sequence(self):
group = RepeatingContainer([1, 2, 3])
self.assertEqual(group._keys, ())
self.assertEqual(group._objs, (1, 2, 3))
def test_init_mapping(self):
data = {'a': 1, 'b': 2, 'c': 3}
group = RepeatingContainer(data)
self.assertEqual(group._keys, tuple(data.keys()))
self.assertEqual(group._objs, tuple(data.values()))
def test_init_iteritems(self):
keys = ('a', 'b', 'c')
values = (1, 2, 3)
group = RepeatingContainer(IterItems(zip(keys, values)))
self.assertEqual(group._keys, keys)
self.assertEqual(group._objs, values)
def test_init_exceptions(self):
with self.assertRaises(TypeError):
RepeatingContainer(123)
with self.assertRaises(ValueError):
RepeatingContainer('abc')
def test_iter_sequence(self):
group = RepeatingContainer([1, 2, 3])
self.assertIsInstance(iter(group), Iterator)
self.assertNotIsInstance(iter(group), IterItems)
self.assertEqual(list(group), [1, 2, 3])
def test_iter_mapping(self):
group = RepeatingContainer({'a': 1, 'b': 2, 'c': 3})
self.assertIsInstance(iter(group), IterItems)
self.assertEqual(set(group), set([('a', 1), ('b', 2), ('c', 3)]))
def test_repr(self):
group = RepeatingContainer([1, 2, 3])
self.assertEqual(repr(group), 'RepeatingContainer([1, 2, 3])')
group = RepeatingContainer([1, 2])
group._keys = ['a', 'b']
self.assertEqual(repr(group), "RepeatingContainer({'a': 1, 'b': 2})")
def test_repr_long(self):
# Get longest element repr that should fit on one line.
single_line_max = 79 - len(RepeatingContainer.__name__) - len("([''])")
# Exactly up-to single-line limit.
value = 'a' * single_line_max
group = RepeatingContainer([value])
self.assertEqual(len(repr(group)), 79)
self.assertEqual(
repr(group),
"RepeatingContainer(['{0}'])".format(value),
)
# Multi-line repr (one char over single-line limit)
value = 'a' * (single_line_max + 1)
group = RepeatingContainer([value])
self.assertEqual(len(repr(group)), 84)
self.assertEqual(
repr(group),
"RepeatingContainer([\n '{0}'\n])".format(value),
)
def test_getattr(self):
class ExampleClass(object):
attr = 123
group = RepeatingContainer([ExampleClass(), ExampleClass()])
group = group.attr
self.assertIsInstance(group, RepeatingContainer)
self.assertEqual(group._objs, (123, 123))
def test_compatible_container(self):
# Test RepeatingContainer of list items.
group = RepeatingContainer([2, 4])
self.assertTrue(
group._compatible_container(RepeatingContainer([5, 6])),
msg='is RepeatingContainer and _objs length matches',
)
self.assertFalse(
group._compatible_container(1),
msg='non-RepeatingContainer values are never compatible',
)
self.assertFalse(
group._compatible_container(RepeatingContainer([5, 6, 7])),
msg='not compatible when _objs length does not match',
)
self.assertFalse(
group._compatible_container(RepeatingContainer({'foo': 5, 'bar': 6})),
msg='not compatible if keys are given but original has no keys',
)
# Test RepeatingContainer of dict items.
group = RepeatingContainer({'foo': 2, 'bar': 4})
self.assertTrue(
group._compatible_container(RepeatingContainer({'foo': 5, 'bar': 6})),
msg='is RepeatingContainer and _keys match',
)
self.assertFalse(
group._compatible_container(RepeatingContainer({'qux': 5, 'quux': 6})),
msg='not compatible if keys do not match',
)
def test_normalize_value(self):
group = RepeatingContainer([2, 4])
result = group._normalize_value(5)
self.assertEqual(
result,
(5, 5),
msg='value is expanded to match number of _objs',
)
result = group._normalize_value(RepeatingContainer([5, 6]))
self.assertEqual(
result,
(5, 6),
msg='compatible RepeatingContainers are unwrapped rather than expanded',
)
other = RepeatingContainer([5, 6, 7])
result = group._normalize_value(other)
self.assertIsInstance(
result,
tuple,
msg='incompatible RepeatingContainers are expanded like other values',
)
self.assertEqual(len(result), 2)
equals_other = super(other.__class__, other).__eq__
self.assertTrue(equals_other(result[0]))
self.assertTrue(equals_other(result[1]))
group = RepeatingContainer([2, 4])
group._keys = ['foo', 'bar']
other = RepeatingContainer([8, 6])
other._keys = ['bar', 'foo'] # <- keys in different order
result = group._normalize_value(other)
self.assertEqual(
result,
(6, 8), # <- reordered to match `group`
msg='result order should match key names, not _obj position',
)
def test_expand_args_kwds(self):
argsgroup = RepeatingContainer([2, 4])
kwdsgroup = RepeatingContainer([2, 4])
kwdsgroup._keys = ['foo', 'bar']
# Unwrap RepeatingContainer.
result = argsgroup._expand_args_kwds(RepeatingContainer([5, 6]))
expected = [
((5,), {}),
((6,), {}),
]
self.assertEqual(result, expected)
# Expand int and unwrap RepeatingContainer.
result = argsgroup._expand_args_kwds(1, RepeatingContainer([5, 6]))
expected = [
((1, 5), {}),
((1, 6), {}),
]
self.assertEqual(result, expected)
# Unwrap two RepeatingContainer.
result = argsgroup._expand_args_kwds(
x=RepeatingContainer([5, 6]),
y=RepeatingContainer([7, 9]),
)
expected = [
((), {'x': 5, 'y': 7}),
((), {'x': 6, 'y': 9}),
]
self.assertEqual(result, expected)
# Kwdsgroup expansion.
kwdgrp2 = RepeatingContainer([5, 6])
kwdgrp2._keys = ['foo', 'bar']
# Unwrap keyed RepeatingContainer.
result = kwdsgroup._expand_args_kwds(kwdgrp2)
expected = [
((5,), {}),
((6,), {}),
]
self.assertEqual(result, expected)
# Unwrap keyed RepeatingContainer with keys in different order.
kwdgrp_reverse = RepeatingContainer([6, 5])
kwdgrp_reverse._keys = ['bar', 'foo']
result = kwdsgroup._expand_args_kwds(kwdgrp_reverse)
expected = [
((5,), {}),
((6,), {}),
]
self.assertEqual(result, expected)
# Expand int and unwrap keyed RepeatingContainer.
result = kwdsgroup._expand_args_kwds(1, kwdgrp2)
expected = [
((1, 5), {}),
((1, 6), {}),
]
self.assertEqual(result, expected)
# Sanity-check/quick integration test (all combinations).
result = kwdsgroup._expand_args_kwds('a', RepeatingContainer({'foo': 'b', 'bar': 'c'}),
x=1, y=RepeatingContainer({'bar': 4, 'foo': 2}))
expected = [
(('a', 'b'), {'x': 1, 'y': 2}),
(('a', 'c'), {'x': 1, 'y': 4}),
]
self.assertEqual(result, expected)
def test__getattr__(self):
number = complex(2, 3)
group = RepeatingContainer([number, number])
group = group.imag # <- Gets `imag` attribute.
self.assertEqual(group._objs, (3, 3))
def test__call__(self):
group = RepeatingContainer(['foo', 'bar'])
result = group.upper()
self.assertIsInstance(result, RepeatingContainer)
self.assertEqual(result._objs, ('FOO', 'BAR'))
def test_added_special_names(self):
"""Test some of the methods that are programmatically added to
RepeatingContainer by the _setup_RepeatingContainer_special_names() function.
"""
group = RepeatingContainer(['abc', 'def'])
result = group + 'xxx' # <- __add__()
self.assertIsInstance(result, RepeatingContainer)
self.assertEqual(result._objs, ('abcxxx', 'defxxx'))
result = group[:2] # <- __getitem__()
self.assertIsInstance(result, RepeatingContainer)
self.assertEqual(result._objs, ('ab', 'de'))
def test_added_reflected_special_names(self):
result = 100 + RepeatingContainer([1, 2]) # <- __radd__()
self.assertIsInstance(result, RepeatingContainer)
self.assertEqual(result._objs, (101, 102))
# When the reflected method is missing, the unreflected method of
# the *other* value is re-called on the RepeatingContainer's contents.
# The following test case does this with strings. Since 'str' does not
# have an __radd__() method, this calls the unreflected __add__()
# of the original string.
result = 'xxx' + RepeatingContainer(['abc', 'def']) # <- unreflected __add__()
self.assertIsInstance(result, RepeatingContainer)
self.assertEqual(result._objs, ('xxxabc', 'xxxdef'))
def test_repeatingcontainer_argument_handling(self):
# Unwrapping RepeatingContainer args with __add__().
group_of_ints1 = RepeatingContainer([50, 60])
group_of_ints2 = RepeatingContainer([5, 10])
group = group_of_ints1 + group_of_ints2
self.assertEqual(group._objs, (55, 70))
# Unwrapping RepeatingContainer args with __getitem__().
group_of_indexes = RepeatingContainer([0, 1])
group_of_strings = RepeatingContainer(['abc', 'abc'])
group = group_of_strings[group_of_indexes]
self.assertEqual(group._objs, ('a', 'b'))
class TestRepeatingContainerBaseMethods(unittest.TestCase):
def setUp(self):
self.group1 = RepeatingContainer(['foo', 'bar'])
self.group2 = RepeatingContainer(['foo', 'baz'])
def test__eq__(self):
# Comparing contents of RepeatingContainer (default behavior).
result = (self.group1 == self.group2) # <- Call to __eq__().
self.assertIsInstance(result, RepeatingContainer)
self.assertEqual(tuple(result), (True, False))
# Comparing RepeatingContainer objects themselves.
result = super(RepeatingContainer, self.group1).__eq__(self.group1)
self.assertIs(result, True)
result = super(RepeatingContainer, self.group1).__eq__(self.group2)
self.assertIs(result, False)
def test__ne__(self):
# Comparing contents of RepeatingContainer (default behavior).
result = (self.group1 != self.group2) # <- Call to __ne__().
self.assertIsInstance(result, RepeatingContainer)
self.assertEqual(tuple(result), (False, True))
# Comparing RepeatingContainer objects themselves.
result = super(RepeatingContainer, self.group1).__ne__(self.group2)
self.assertIs(result, True)
result = super(RepeatingContainer, self.group1).__ne__(self.group1)
self.assertIs(result, False)
class TestNestedExample(unittest.TestCase):
"""Quick integration test using nested RepeatingContainers."""
def setUp(self):
self.group = RepeatingContainer([
RepeatingContainer({'foo': 'abc', 'bar': 'def'}),
'ghi',
])
def test_method(self):
result1, result2 = self.group.upper()
self.assertEqual(dict(result1), {'foo': 'ABC', 'bar': 'DEF'})
self.assertEqual(result2, 'GHI')
def test_magic_method(self):
result1, result2 = self.group + 'XYZ'
self.assertEqual(dict(result1), {'foo': 'abcXYZ', 'bar': 'defXYZ'})
self.assertEqual(result2, 'ghiXYZ')
def test_unreflected_magic_method(self):
result1, result2 = 'XYZ' + self.group
self.assertEqual(dict(result1), {'foo': 'XYZabc', 'bar': 'XYZdef'})
self.assertEqual(result2, 'XYZghi')
def test_deeply_nested(self):
group = RepeatingContainer([
RepeatingContainer([
RepeatingContainer(['abc', 'def']),
RepeatingContainer(['abc', 'def']),
]),
RepeatingContainer([
RepeatingContainer(['abc', 'def']),
RepeatingContainer(['abc', 'def'])
])
])
result = group + ('xxx' + group.upper()) # <- Operate on RepeatingContainer.
# Unpack various nested values.
subresult1, subresult2 = result
subresult1a, subresult1b = subresult1
subresult2a, subresult2b = subresult2
self.assertEqual(subresult1a._objs, ('abcxxxABC', 'defxxxDEF'))
self.assertEqual(subresult1b._objs, ('abcxxxABC', 'defxxxDEF'))
self.assertEqual(subresult2a._objs, ('abcxxxABC', 'defxxxDEF'))
self.assertEqual(subresult2b._objs, ('abcxxxABC', 'defxxxDEF'))
@unittest.skipUnless(pandas, 'requires pandas')
class TestPandasExample(unittest.TestCase):
"""Quick integration test using a RepeatingContainer of DataFrames."""
def setUp(self):
data = pandas.DataFrame({
'A': ('x', 'x', 'y', 'y', 'z', 'z'),
'B': ('foo', 'foo', 'foo', 'bar', 'bar', 'bar'),
'C': (20, 30, 10, 20, 10, 10),
})
self.group = RepeatingContainer([data, data])
def test_summed_values(self):
result = self.group['C'].sum()
self.assertEqual(tuple(result), (100, 100))
def test_selected_grouped_summed_values(self):
result = self.group[['A', 'C']].groupby('A').sum()
expected = pandas.DataFrame(
data={'C': (50, 30, 20)},
index=pandas.Index(['x', 'y', 'z'], name='A'),
)
df1, df2 = result # Unpack results.
pandas.testing.assert_frame_equal(df1, expected)
| pandas.testing.assert_frame_equal(df2, expected) | pandas.testing.assert_frame_equal |
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
import plotly.graph_objs as go
import plotly.express as px
import numpy as np
import pandas as pd
# adding an CSS stylesheet
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
# initilaise dash_html_components
app = dash.Dash(__name__, external_stylesheets = external_stylesheets)
server = app.server
# now read the data to it
df = | pd.read_csv('restaurants_zomato.csv', encoding='ISO-8859-1') | pandas.read_csv |
from flask import Flask, render_template,request, url_for, redirect
import plotly
import plotly.graph_objs as go
import pandas as pd
import numpy as np
import json
import functions
with open('data/users.json', 'r', errors='ignore') as f:
data = json.load(f)
users = pd.DataFrame(data)
with open('data/problems.json', 'r', errors='ignore') as f:
data = json.load(f)
problems = | pd.DataFrame(data) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_series_equal
from src.policies.single_policy_functions import _interpolate_activity_level
from src.policies.single_policy_functions import reduce_recurrent_model
from src.policies.single_policy_functions import reduce_work_model
from src.policies.single_policy_functions import reopen_other_model
from src.policies.single_policy_functions import shut_down_model
@pytest.fixture
def fake_states():
states = pd.DataFrame(index=np.arange(10))
states["state"] = ["Bayern", "Berlin"] * 5
# date at which schools are open in Berlin but closed in Bavaria
# date with uneven week number, i.e. where group a attends school
states["date"] = pd.Timestamp("2020-04-23")
states["school_group_a"] = [0, 1] * 5
states["occupation"] = pd.Categorical(
["school"] * 8 + ["preschool_teacher", "school_teacher"]
)
states["educ_worker"] = [False] * 8 + [True] * 2
states["age"] = np.arange(10)
return states
def test_shut_down_model_non_recurrent():
contacts = pd.Series(np.arange(3))
states = pd.DataFrame(index=["a", "b", "c"])
calculated = shut_down_model(states, contacts, 123, is_recurrent=False)
expected = | pd.Series(0, index=["a", "b", "c"]) | pandas.Series |
import copy
import csv
import gzip
import logging
import os
import re
import subprocess
import tempfile
from collections import defaultdict
from multiprocessing import Pool
from pathlib import Path
import numpy as np
import pandas as pd
import tqdm
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from centreseq.bin.core.accessories import run_subprocess
main_log = logging.getLogger('main_log')
def read_seqs(infile, filter_list=None):
"""
Reads up sequences from a path to a fasta file
:param infile: path to fasta file
:param filter_list: Strings that should be in the description of the sequences
:return: a list of strings
"""
r = []
f = open_possible_gzip(infile)
for seq in SeqIO.parse(f, "fasta"):
if filter_list is not None:
assert isinstance(filter_list, list)
if any([x in seq.description for x in filter_list]):
r.append(seq)
else:
r.append(seq)
f.close()
return r
def faster_fasta_searching(infile, filter_list=[]):
"""
Loads up a fasta file into a list. Much faster than using SeqIO
:param infile: fasta infile
:param filter_list: a list of sequence ids you want to keep. If you want to keep everything pass []
:return:
"""
skip = True
gene_name = ""
gene_description = ""
seq = ""
seqs_all = []
f = open_possible_gzip(infile)
for line in f:
if line[0] == ">":
# Resolve last gene
if (filter_list == []) | (gene_name in filter_list):
seqs_all.append(SeqRecord(Seq(seq), id=gene_name, name=gene_name, description=gene_description))
# Initialize new gene
seq = ""
gene_name = line.split(" ")[0].lstrip(">")
gene_description = line.rstrip("\n")
# If we want everything
if filter_list == []:
skip = False
else:
# Keep this gene
if gene_name in filter_list:
skip = False
else:
skip = True
elif skip:
continue
else:
# Add sequence to the string
seq += line.rstrip("\n")
f.close()
# Resolve the final gene
if (filter_list == []) | (gene_name in filter_list):
seqs_all.append(SeqRecord(Seq(seq), id=gene_name, name=gene_name, description=gene_description))
return seqs_all
def open_possible_gzip(infile, flags="rt"):
"""
Opens a file handle for a gzipped or non-zipped file
:param infile: Path to file
:param flags:
:return: file handle
"""
infile = str(infile)
if re.search("\.gz$", infile):
f = gzip.open(infile, flags)
else:
f = open(infile, flags)
return f
def write_seqs_to_file(seq_list, outfile_seq=None):
"""
Write sequences to file. If not file is given then this is written to a tempfile
:param seq_list: a list of sequence objects
:param outfile_seq: outfile path
:return: the name of the output file
"""
if outfile_seq is None:
outfile_seq = tempfile.NamedTemporaryFile(suffix=".fasta", delete=False).name
with open(outfile_seq, "w") as f:
SeqIO.write(seq_list, f, "fasta")
return outfile_seq
def run_mmseqs(seqs1, seqs2):
"""
Equivalent to blast_seqs() but uses mmseqs and thus is much faster
:param seqs1: list of sequences to compare
:param seqs2: list of sequence to be compared against
:return:
"""
query_fasta = write_seqs_to_file(seqs1)
target_fasta = write_seqs_to_file(seqs2)
outfile = Path(tempfile.gettempdir()) / (next(tempfile._get_candidate_names()) + ".dat")
tmpdir = tempfile.TemporaryDirectory()
# This needs at least mmseqs v8
result = subprocess.run(["mmseqs"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# m = re.search("MMseqs2 Version: ([0-9])\..+", result.stdout.decode('utf-8'))
# assert m, "Can't read your mmseqs version, requires at least version 8"
# assert int(m.group(1)) >= 8, "Require mmseqs at least version 8"
cmd = f"mmseqs easy-search {query_fasta} {target_fasta} {outfile} {tmpdir.name} --threads 1 --split-memory-limit {max_mem_use} --search-type 3"
run_subprocess(cmd, get_stdout=True)
with open(outfile) as f:
mmseqs_output = f.read().rstrip("\n")
# I've renamed these for consistency with blast output
columns = "qseqid,sseqid,pident,alnlen,mismatch,gapopen,qstart,qend,tstart,tend,evalue,bitscore".split(",")
return mmseqs_output, columns
def load_pangenome_list(pangenome_list: list):
"""
Takes a putative core genome list and load it up
Checks whether there are any paralogs, and therefore, if we need to run the algorithm
:param pangenome_list: a list of gene names
"""
# Default is no change
update = False
# Check if we have any paralogs, and thus, need to update
for item in pangenome_list:
if | pd.isnull(item) | pandas.isnull |
# coding: utf-8
# ### Import
# In[1]:
from bs4 import BeautifulSoup
import requests
import numpy as np
import pandas as pd
import xgboost
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from sklearn.metrics import *
from IPython.core.display import Image
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix
from sklearn.tree import export_graphviz
import io
from sklearn.preprocessing import Imputer
import pydot
from sklearn import preprocessing
import lightgbm as lgb
from scipy.stats import mode
import re
from datetime import datetime
from lightgbm import plot_importance
import warnings
warnings.filterwarnings('ignore')
# ---
# ### Date read
# In[12]:
age_gender_bkts = pd.read_csv("age_gender_bkts.csv")
countries = pd.read_csv("countries.csv")
sessions = pd.read_csv("sessions.csv")
test_users = pd.read_csv("test_users.csv")
train_users_2 = pd.read_csv("train_users_2.csv")
sample_submission_NDF = pd.read_csv("sample_submission_NDF.csv")
merged_sessions = pd.read_csv("merged_sessions.csv")
# ---
# ### Date setting - Base1
# In[13]:
def pre_age_set_data(train_users_2, test_users):
check = pd.concat([train_users_2, test_users], ignore_index=True)
check["first_affiliate_tracked"] = check["first_affiliate_tracked"].replace(np.nan, "untracked")
check["date_account_created"] = pd.to_datetime(check["date_account_created"], format = "%Y-%m-%d")
check["timestamp_first_active"] = pd.to_datetime(check["timestamp_first_active"], format="%Y%m%d%H%M%S")
s_lag = check["timestamp_first_active"] - check["date_account_created"]
check["lag_days"] = s_lag.apply(lambda x : -1 * x.days)
check["lag_seconds"] = s_lag.apply(lambda x : x.seconds)
s_all_check = (check['age'] < 120) & (check['gender'] != '-unknown-')
check['faithless_sign'] = s_all_check.apply(lambda x : 0 if x == True else 1)
pre_age = check.drop("date_first_booking",axis = 1)
pre_age['date_account_created_y'] = pre_age["date_account_created"].apply(lambda x : x.year)
pre_age['date_account_created_m'] = pre_age["date_account_created"].apply(lambda x : x.month)
pre_age['date_account_created_d'] = pre_age["date_account_created"].apply(lambda x : x.day)
pre_age['timestamp_first_active_y'] = pre_age["timestamp_first_active"].apply(lambda x : x.year)
pre_age['timestamp_first_active_m'] = pre_age["timestamp_first_active"].apply(lambda x : x.month)
pre_age['timestamp_first_active_d'] = pre_age["timestamp_first_active"].apply(lambda x : x.day)
pre_age = pre_age.drop("date_account_created" , axis=1)
pre_age = pre_age.drop("timestamp_first_active" , axis=1)
return check, pre_age
# ---
# ### Date setting - Base2
# In[14]:
def pre_age_predict_data(pre_age):
pre_age['age'] = pre_age['age'].fillna(-1)
pre_age_sub = pre_age.filter(items = ['age', 'country_destination','id'])
pre_age_dum = pre_age.filter(items = ['affiliate_channel', 'affiliate_provider',
'first_affiliate_tracked', 'first_browser', 'first_device_type',
'language', 'signup_app', 'signup_flow',
'signup_method', 'date_account_created_y', 'date_account_created_m',
'date_account_created_d', 'timestamp_first_active_y',
'timestamp_first_active_m', 'timestamp_first_active_d',"lag_days","lag_seconds",
"faithless_sign"])
pre_age_dum[['date_account_created_y', 'date_account_created_m', 'date_account_created_d', 'timestamp_first_active_y','timestamp_first_active_m', 'timestamp_first_active_d']] = pre_age_dum[['date_account_created_y', 'date_account_created_m', 'date_account_created_d', 'timestamp_first_active_y', 'timestamp_first_active_m', 'timestamp_first_active_d']].astype(str)
pre_age_dum = pd.get_dummies(pre_age_dum)
pre_age_dum_con = pd.concat([pre_age_dum, pre_age_sub], axis=1)
pre_age_dum_con["age"] = pre_age_dum_con["age"].replace(-1, np.nan)
pre_age_mission = pre_age_dum_con[pre_age_dum_con["age"].isna()].reset_index()
pre_age_train = pre_age_dum_con[pre_age_dum_con["age"].notna()].reset_index()
pre_age_mission_test = pre_age_mission.drop("index", axis=1)
pre_age_train_test = pre_age_train.drop("index", axis=1)
pre_age_mission_test_drop = pre_age_mission_test.drop(['id', 'age', 'country_destination'], axis=1)
pre_age_train_test_drop = pre_age_train_test.drop(['id', 'age', 'country_destination'], axis=1)
return pre_age_mission_test, pre_age_train_test, pre_age_mission, pre_age_train, pre_age_mission_test_drop, pre_age_train_test_drop
# In[15]:
def pre_age_predict_data_cat(pre_age_train):
bins = [0, 15, 25, 35, 60, 9999]
labels = ["미성년자", "청년", "중년", "장년", "노년"]
cats = pd.cut(pre_age_train['age'], bins, labels=labels)
cats = pd.DataFrame(cats)
return cats
# ---
# ### Predict gender data setting - Only gender
# In[16]:
def add_gender(pre_age):
pred_gen_data = pd.read_csv("model_gen_lgb.csv")
pre_gen_sub = pre_age.filter(items = ['age', 'country_destination', 'id', 'gender'])
pre_gen_dum = pre_age.filter(items = ['affiliate_channel', 'affiliate_provider',
'first_affiliate_tracked', 'first_browser', 'first_device_type',
'language', 'signup_app', 'signup_flow',
'signup_method', 'date_account_created_y', 'date_account_created_m',
'date_account_created_d', 'timestamp_first_active_y',
'timestamp_first_active_m', 'timestamp_first_active_d',"lag_days","lag_seconds",
"faithless_sign"])
pre_gen_dum = pd.get_dummies(pre_gen_dum)
pre_gen_dum_con = pd.concat([pre_gen_dum, pre_gen_sub], axis=1)
pre_gen_dum_con["gender"] = pre_gen_dum_con["gender"].replace(['-unknown-', 'OTHER'], np.nan)
pre_gen_mission = pre_gen_dum_con[pre_gen_dum_con["gender"].isna()].reset_index()
pre_gen_train = pre_gen_dum_con[pre_gen_dum_con["gender"].notna()].reset_index()
pre_gen_mission_test = pre_gen_mission.drop("index", axis=1)
pre_gen_train_test = pre_gen_train.drop("index", axis=1)
pre_gen_mission_test_drop = pre_gen_mission_test.drop(['id', 'age', 'country_destination', "gender"], axis=1)
pre_gen_train_test_drop = pre_gen_train_test.drop(['id', 'age', 'country_destination', "gender"], axis=1)
pre_gen_mission_test_la = pd.concat([pre_gen_mission_test, pred_gen_data], axis=1)
pre_gen_mission_test_la = pre_gen_mission_test_la.drop("gender", axis=1)
pre_gen_mission_test_la = pre_gen_mission_test_la.rename(columns={"0": 'gender'})
last_gen_add = pd.concat([pre_gen_mission_test_la, pre_gen_train_test])
last_gen_add = last_gen_add.filter(items = ["id",'gender'])
return last_gen_add
# ---
# ### Holiday, Weekend, Day of week data setting - Only Holiday
# In[17]:
def holiday(train_users_2, test_users):
def get_holidays(year):
response = requests.get("https://www.timeanddate.com/calendar/custom.html?year="+str(year)+" &country=1&cols=3&df=1&hol=25")
dom = BeautifulSoup(response.content, "html.parser")
trs = dom.select("table.cht.lpad tr")
df = pd.DataFrame(columns=["date", "holiday"])
for tr in trs:
datestr = tr.select_one("td:nth-of-type(1)").text
date = datetime.strptime("{} {}".format(year, datestr), '%Y %b %d')
holiday = tr.select_one("td:nth-of-type(2)").text
df.loc[len(df)] = {"date" : date, "holiday": 1}
return df
holiday_ls = []
for year in range(2009, 2015):
df = get_holidays(year)
holiday_ls.append(df)
holiday_df = | pd.concat(holiday_ls) | pandas.concat |
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
plt.rcParams['font.size'] = 6
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
graphs_path = root_path+'/boundary_effect/graph/'
if not os.path.exists(graphs_path):
os.makedirs(graphs_path)
time = pd.read_csv(root_path+'/time_series/MonthlyRunoffWeiRiver.csv')['Time']
time = time.values
time = [datetime.strptime(t,'%Y/%m') for t in time]
time = [t.strftime('%b %Y') for t in time]
# print(time)
# CHECK 1: is EEMD shift-invariant?
# If yes, any shifted copy of an IMF from a EEMD decomposition, similar to a
# shifted copy of the original time series, should be maintained.
# For example, given the sunspot time series x (of length 792) we can
# generate a 1-step advanced copy of the original time series as follows:
# x0=(1:791)
# x1=(2:792) this is a 1-step advanced version of x0
# Observiously, shift-invariancy is preserved between x0 and x1 since
# x0(2:791)=x1(1:790)
# For shift-invariancy to be preserved for EEMD, we would observe, for
# example, that the EEMD IMF1 components for x0 (imf1 of x0) and x1 (imf1 of
# x1) should be exact copies of one another, advanced by a single step.
# i.e., x0_imf(2:791,1) should equal x1_imf(1:790,1) if shift-invariancy
# is preserved.
# As the case for EEMD shown below, we can see the x0_imf(2:791,1) basically
# equal to x1_imf(1:790,1) except for a few samples close to the begin and
# end of x0 and x1. Interestingly, we see a low level of error close to the
# begin of the time series and a high level of error close to the end of
# the time series, of high importance in operational forecasting tasks.
# The errors along the middle range are zeros indicating EEMD is
# shift-invariant.
# We argue that the error close to the boundaries are
# caused by boundary effect, which is the exact problem this study designed
# to solve.
# CHECK 2: The impact of appedning data points to a time series then
# performing EEMD, analogous the case in operational forecasting when new
# data becomes available and an updated forecast is made using the newly
# arrived data.
# Ideally, for forecasting situations, when new data is appended to a time
# series and some preprocessing is performed, it should not have an impact
# on previous measurements of the pre-processed time series.
# For example, if IMF1_1:N represents the IMF1, which has N total
# measurements and was derived by applying EEMD to x_1:N the we would expect
# that when we perform EEMD when x is appended with another measurement,
# i.e., x_1:N+1, resulting in IMF1_1:N+1 that the first 1:N measurements in
# IMF1_1:N+1 are equal to IMF1_1:N. In other words,
# IMF1_1:N+1[1:N]=IMF1_1:N[1:N].
# We see than is not the case. Appending an additional observation to the
# time series results in the updated EEMD components to be entirely
# different then the original (as of yet updated) EEMD components.
# Interesting, we see a high level of error at the boundaries of the time
# seriesm, of high importance in operational forecasting tasks.
x0_imf = pd.read_csv(root_path+'/boundary_effect/eemd-decompositions-huaxian/x0_imf.csv')
x1_imf = pd.read_csv(root_path+'/boundary_effect/eemd-decompositions-huaxian/x1_imf.csv')
x_1_552_imf = pd.read_csv(root_path+"/boundary_effect/eemd-decompositions-huaxian/x_1_552_imf.csv")
x_1_791_imf = pd.read_csv(root_path+'/boundary_effect/eemd-decompositions-huaxian/x_1_791_imf.csv')
x_1_792_imf = pd.read_csv(root_path+'/boundary_effect/eemd-decompositions-huaxian/x_1_792_imf.csv')
x0_imf1_2_791 = x0_imf['IMF1'][1:790]
x0_imf1_2_791 = x0_imf1_2_791.reset_index(drop=True)
x1_imf1_1_790 = x1_imf['IMF1'][0:789]
x1_imf1_1_790 = x1_imf1_1_790.reset_index(drop=True)
print(x0_imf1_2_791)
print(x1_imf1_1_790)
err = x0_imf1_2_791-x1_imf1_1_790
# err_df = pd.DataFrame(err.values,columns=['err'])
print(err)
err.to_csv(root_path+'/results_analysis/results/shift_variance_err.csv')
x_1_552_imf1 = x_1_552_imf['IMF1']
x_1_791_imf1 = x_1_791_imf['IMF1']
x_1_792_imf1 = x_1_792_imf['IMF1']
err_append_one = x_1_792_imf1[0:790]-x_1_791_imf1[0:790]
err_append_several = x_1_792_imf1[0:551]-x_1_552_imf1[0:551]
err_append_one_df = pd.DataFrame(err_append_one,columns=['err'])
err_append_several_df = pd.DataFrame(err_append_several,columns=['err'])
print(err_append_one_df)
print(err_append_several_df)
err_append_one.to_csv(root_path+'/results_analysis/results/err_append_one.csv')
err_append_several.to_csv(root_path+'/results_analysis/results/err_append_several.csv')
xx = -6
aceg_y = 15
bdf_y = 2.4
y_min = -15
y_max = 20
ye_min = -1.3
ye_max = 3.1
plt.figure(figsize=(7.48,6))
plt.subplot(4,2,1)
plt.text(xx,aceg_y,'(a)',fontsize=7,fontweight='bold',bbox=dict(facecolor='thistle', alpha=0.25))
plt.plot(x0_imf1_2_791,c='b',label=r'$IMF_{1}(2:791)$ of $x_{0}$')
plt.plot(x1_imf1_1_790,c='g',label=r'$IMF_{1}(1:790)$ of $x_{1}$')
plt.xlabel('Time (From '+time[1]+' to '+time[790]+')')
plt.ylabel(r"Runoff($10^8m^3$)")
plt.ylim(y_min,y_max)
plt.legend(ncol=2)
plt.subplot(4,2,2)
plt.text(xx,bdf_y,'(b)',fontsize=7,fontweight='bold',bbox=dict(facecolor='thistle', alpha=0.25))
shift_var=plt.plot(err,'o',markerfacecolor='w',markeredgecolor='r',markersize=4.5,
label=R'''Error between $IMF_{1}(2:791)$
of $x_{0}$ and $IMF_{1}(1:790)$ of $x_{1}$''')
plt.xlabel('Time (From '+time[1]+' to '+time[790]+')')
plt.ylabel(r"Runoff($10^8m^3$)")
plt.ylim(ye_min,ye_max)
plt.legend()
plt.subplot(4,2,3)
plt.text(xx,aceg_y,'(c)',fontsize=7,fontweight='bold',bbox=dict(facecolor='thistle', alpha=0.25))
plt.plot(x_1_791_imf1,c='b',label=r'$IMF_{1}$ of $x_{1-791}$')
plt.plot(x_1_792_imf1,c='g',label=r'$IMF_{1}$ of $x_{1-792}$')
plt.xlabel('Time (From '+time[0]+' to '+time[791]+')')
plt.ylabel(r"Runoff($10^8m^3$)")
plt.ylim(y_min,y_max)
plt.legend(ncol=2)
plt.subplot(4,2,4)
plt.text(xx,bdf_y,'(d)',fontsize=7,fontweight='bold',bbox=dict(facecolor='thistle', alpha=0.25))
plt.plot(err_append_one,'o',markerfacecolor='w',markeredgecolor='r',markersize=4.5,
label=R'''Error between $IMF_{1}(1:791)$ of
$x_{1-791}$ and $IMF_{1}(1:791)$ of $x_{1-792}$''')
plt.xlabel('Time (From '+time[0]+' to '+time[790]+')')
plt.ylabel(r"Runoff($10^8m^3$)")
plt.ylim(ye_min,ye_max)
plt.legend(loc=9)
plt.subplot(4,2,5)
plt.text(xx,aceg_y,'(e)',fontsize=7,fontweight='bold',bbox=dict(facecolor='thistle', alpha=0.25))
plt.plot(x_1_552_imf1,c='b',label=r'$IMF_{1}$ of $x_{1-552}$')
plt.plot(x_1_792_imf1,c='g',label=r'$IMF_{1}$ of $x_{1-792}$')
plt.xlabel('Time (From '+time[0]+' to '+time[791]+')')
plt.ylabel(r"Runoff($10^8m^3$)")
plt.ylim(y_min,y_max)
plt.legend(ncol=2)
plt.subplot(4,2,6)
plt.text(xx,bdf_y,'(f)',fontsize=7,fontweight='bold',bbox=dict(facecolor='thistle', alpha=0.25))
plt.plot(err_append_several,'o',markerfacecolor='w',markeredgecolor='r',markersize=4.5,
label=R'''Error between $IMF_{1}(1:552)$ of
$x_{1-552}$ and $IMF_{1}(1:552)$ of $x_{1-792}$''')
plt.xlabel('Time (From '+time[0]+' to '+time[551]+')')
plt.ylabel(r"Runoff($10^8m^3$)")
plt.ylim(ye_min,ye_max)
plt.legend(loc=9,ncol=2)
eemd_train = | pd.read_csv(root_path+"/Huaxian_eemd/data/EEMD_TRAIN.csv") | pandas.read_csv |
# coding: utf-8
# # Dataset Statistics for Compound Gene Sentences
# This notebook is designed to show statistics on the data extracted from pubmed. The following cells below here are needed to set up the environment.
# In[1]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('matplotlib', 'inline')
from collections import Counter
from itertools import product
import os
import pickle
import sys
sys.path.append(os.path.abspath('../../../modules'))
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib_venn import venn2
import seaborn as sns
from tqdm import tqdm_notebook
sns.set(rc={'figure.figsize':(12,6), "font.size":17})
# In[2]:
#Set up the environment
username = "danich1"
password = "<PASSWORD>"
dbname = "pubmeddb"
#Path subject to change for different os
database_str = "postgresql+psycopg2://{}:{}@/{}?host=/var/run/postgresql".format(username, password, dbname)
os.environ['SNORKELDB'] = database_str
from snorkel import SnorkelSession
session = SnorkelSession()
# In[3]:
from snorkel.models import candidate_subclass, Candidate
CompoundGene = candidate_subclass('CompoundGene', ['Compound', 'Gene'])
# In[4]:
from utils.notebook_utils.dataframe_helper import write_candidates_to_excel, make_sentence_df
# # Read Full Sentence Table
# The cells below will read every sentence that contains a gene and compound entity from the sentence table in our postgres database. For time sake majority of the data has already been processed and save as files mentioned below.
# In[ ]:
sql= '''
select id as sentence_id, text, (
char_length(regexp_replace(CAST(words AS TEXT), '[\u0080-\u00ff]', '', 'g')) -
char_length(regexp_replace(regexp_replace(CAST(words AS TEXT), '[\u0080-\u00ff]', '', 'g'), ',', '','g'))
) as sen_length, entity_types
from sentence
where entity_types::text like '%%Compound%%' or entity_types::text like '%%Gene%%';
'''
sentence_df = pd.read_sql(sql, database_str)
sentence_df.head(2)
# In[ ]:
entity_data = []
tagging_error_ids = set({})
#skip tagging error
skip_tag_error = False
for index, row in tqdm_notebook(sentence_df.iterrows()):
#create dictionay for mapping entity types
entity_mapper = {"sentence_id": row['sentence_id']}
#Keep track of previous entity
previous_entity = 'o'
#For all entitys in a given sentence decide what is tagged
for entity in row['entity_types']:
entity = entity.lower()
#Non-O tag
if entity != 'o' and previous_entity =='o':
#If entity not seen before instanciate it
if entity not in entity_mapper:
entity_mapper[entity] =0
entity_mapper[entity] += 1
# If previous tag was non-O and the current tag does not equal previous
# Then tagging error. e.x. Disease, Gene, Disease instead of Disease, O, Disease
elif entity != previous_entity and entity != 'o':
tagging_error_ids.add(row['sentence_id'])
skip_tag_error = True
break
previous_entity = entity
# Do not add errors to dataframe
# They will be thrown out
if not skip_tag_error:
entity_data.append(entity_mapper)
skip_tag_error=False
entity_stats_df = pd.DataFrame.from_dict(entity_data).fillna(0)
entity_stats_df.head(2)
# In[ ]:
tagging_error_df = pd.Series(sorted(list(tagging_error_ids)))
tagging_error_df.to_csv("results/tagging_error_ids.tsv.xz", sep="\t", index=False, compression="xz")
tagging_error_df.head(2)
# In[ ]:
print(
"Total Number of IOB Tagging Errors: {}. Percentage of sentences affected: {:.2f}".format(
tagging_error_df.shape[0],
100*tagging_error_df.shape[0]/sentence_df.shape[0]
)
)
# In[ ]:
header = ["sentence_id", "text", "sen_length"]
sentence_df[header].to_csv("results/sentence_stats.tsv.xz", sep="\t", index=False, compression="xz")
entity_stats_df.to_csv("results/entity_stats.tsv.xz", sep="\t", index=False, compression="xz")
# # Sentence Counts and Statistics
# Below is the block of code that contains information about the full distribution of sentences tied to each candidate pair. Multiple sentences can contain more than one co-occuring pair, which results in some sentences being counted more than once. For example
# ```
# To assess the importance of the consensus amino acid sequence in [elf-4a G] for [atp G] binding, we mutated the consensus amino-proximal [glycine C] and [lysine C] to isoleucine and asparagine, respectively.
# ```
#
# In this sentence there are multiple mentions of genes and compounds, where some compounds are artificial proteins.
# ## Load and Merge DataFrames
# In[5]:
entity_level_df = | pd.read_csv("../datafile/results/compound_binds_gene.tsv.xz") | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 4 10:30:55 2018
@author: niels-peter
"""
import numpy as np
from numpy import math
import pandas as pd
import pickle
from sklearn.externals import joblib
import re
from italy_transformation import *
clf_EW_italy = joblib.load('/home/niels-peter/Dokumenter/EW_DK_ITALY.pkl')
#clf_EW_italy = joblib.load('EW_DK_ITALY_dummy.pkl')
xl = | pd.ExcelFile('/home/niels-peter/Dokumenter/ITALY_100_FINANCIAL_STATEMENT.xlsx') | pandas.ExcelFile |
#!/usr/bin/env python
# coding: utf-8
# # <font color='yellow'>How can we predict not just the hourly PM2.5 concentration at the site of one EPA sensor, but predict the hourly PM2.5 concentration anywhere?</font>
#
# Here, you build a new model for any given hour on any given day. This will leverage readings across all ~120 EPA sensors, as well as weather data, traffic data, purpleair data, and maybe beacon data to create a model that predicts the PM2.5 value at that location.
# In[1]:
import json
import csv
import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
import geopandas as gpd
import shapely
from shapely.geometry import Point, MultiPoint, Polygon, MultiPolygon
from shapely.affinity import scale
import matplotlib.pyplot as plt
import glob
import os
import datetime
from datetime import timezone
import zipfile
import pickle
pd.set_option('display.max_columns', 500)
# ## <font color='yellow'>Loading data</font>
#
# We'll load EPA data, weather data, truck traffic data, Beacon data, and purpleair data
# In[2]:
df_epa = pd.read_csv("EPA_Data_MultiPointModel.csv")
df_epa.head(1)
# In[3]:
df_beac = pd.read_csv("Beacon_Data_MultiPointModel.csv")
df_beac.head(1)
# In[5]:
df_noaa = | pd.read_csv("NOAA_Data_MultiPointModel.csv") | pandas.read_csv |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Telecom Churn Case Study
# With 21 predictor variables we need to predict whether a particular customer will switch to another telecom provider or not. In telecom terminology, this is referred to as churning and not churning, respectively.
# ### Step 1: Importing and Merging Data
# Suppressing Warnings
import warnings
warnings.filterwarnings('ignore')
# Importing Pandas and NumPy
import pandas as pd, numpy as np
# Importing all datasets
churn_data = pd.read_csv("churn_data.csv")
churn_data.head()
customer_data = pd.read_csv("customer_data.csv")
customer_data.head()
internet_data = pd.read_csv("internet_data.csv")
internet_data.head()
# #### Combining all data files into one consolidated dataframe
# Merging on 'customerID'
df_1 = pd.merge(churn_data, customer_data, how='inner', on='customerID')
# Final dataframe with all predictor variables
telecom = pd.merge(df_1, internet_data, how='inner', on='customerID')
# ### Step 2: Inspecting the Dataframe
telecom.OnlineBackup.value_counts()
# Let's see the head of our master dataset
telecom.head()
# Let's check the dimensions of the dataframe
telecom.shape
# let's look at the statistical aspects of the dataframe
telecom.describe()
# Let's see the type of each column
telecom.info()
# ### Step 3: Data Preparation
# #### Converting some binary variables (Yes/No) to 0/1
# +
# List of variables to map
varlist = ['PhoneService', 'PaperlessBilling', 'Churn', 'Partner', 'Dependents']
# Defining the map function
def binary_map(x):
return x.map({'Yes': 1, "No": 0})
# Applying the function to the housing list
telecom[varlist] = telecom[varlist].apply(binary_map)
# -
telecom.head()
# #### For categorical variables with multiple levels, create dummy features (one-hot encoded)
# +
# Creating a dummy variable for some of the categorical variables and dropping the first one.
dummy1 = pd.get_dummies(telecom[['Contract', 'PaymentMethod', 'gender', 'InternetService']], drop_first=True)
# Adding the results to the master dataframe
telecom = pd.concat([telecom, dummy1], axis=1)
# -
telecom.head()
# +
# Creating dummy variables for the remaining categorical variables and dropping the level with big names.
# Creating dummy variables for the variable 'MultipleLines'
ml = pd.get_dummies(telecom['MultipleLines'], prefix='MultipleLines')
# Dropping MultipleLines_No phone service column
ml1 = ml.drop(['MultipleLines_No phone service'], 1)
#Adding the results to the master dataframe
telecom = pd.concat([telecom,ml1], axis=1)
# Creating dummy variables for the variable 'OnlineSecurity'.
os = pd.get_dummies(telecom['OnlineSecurity'], prefix='OnlineSecurity')
os1 = os.drop(['OnlineSecurity_No internet service'], 1)
# Adding the results to the master dataframe
telecom = pd.concat([telecom,os1], axis=1)
# Creating dummy variables for the variable 'OnlineBackup'.
ob = pd.get_dummies(telecom['OnlineBackup'], prefix='OnlineBackup')
ob1 = ob.drop(['OnlineBackup_No internet service'], 1)
# Adding the results to the master dataframe
telecom = pd.concat([telecom,ob1], axis=1)
# Creating dummy variables for the variable 'DeviceProtection'.
dp = pd.get_dummies(telecom['DeviceProtection'], prefix='DeviceProtection')
dp1 = dp.drop(['DeviceProtection_No internet service'], 1)
# Adding the results to the master dataframe
telecom = pd.concat([telecom,dp1], axis=1)
# Creating dummy variables for the variable 'TechSupport'.
ts = pd.get_dummies(telecom['TechSupport'], prefix='TechSupport')
ts1 = ts.drop(['TechSupport_No internet service'], 1)
# Adding the results to the master dataframe
telecom = pd.concat([telecom,ts1], axis=1)
# Creating dummy variables for the variable 'StreamingTV'.
st =pd.get_dummies(telecom['StreamingTV'], prefix='StreamingTV')
st1 = st.drop(['StreamingTV_No internet service'], 1)
# Adding the results to the master dataframe
telecom = pd.concat([telecom,st1], axis=1)
# Creating dummy variables for the variable 'StreamingMovies'.
sm = pd.get_dummies(telecom['StreamingMovies'], prefix='StreamingMovies')
sm1 = sm.drop(['StreamingMovies_No internet service'], 1)
# Adding the results to the master dataframe
telecom = pd.concat([telecom,sm1], axis=1)
# -
telecom.head()
# #### Dropping the repeated variables
# We have created dummies for the below variables, so we can drop them
telecom = telecom.drop(['Contract','PaymentMethod','gender','MultipleLines','InternetService', 'OnlineSecurity', 'OnlineBackup', 'DeviceProtection',
'TechSupport', 'StreamingTV', 'StreamingMovies'], 1)
telecom = telecom.loc[~telecom.index.isin([488, 753, 936, 1082, 1340, 3331, 3826, 4380, 5218, 6670, 6754])]
# +
# telecom['TotalCharges'].sample(40)
# telecom['TotalCharges'].str.replace('.', '', 1).str.contains('\D',regex=True).sum()
# telecom[telecom['TotalCharges'].str.replace('.', '', 1).str.contains('\D',regex=True)].TotalCharges.index
# -
#The varaible was imported as a string we need to convert it to float
telecom['TotalCharges'] = telecom['TotalCharges'].str.strip().astype('float64')
telecom.info()
# Now you can see that you have all variables as numeric.
# #### Checking for Outliers
# Checking for outliers in the continuous variables
num_telecom = telecom[['tenure','MonthlyCharges','SeniorCitizen','TotalCharges']]
# Checking outliers at 25%, 50%, 75%, 90%, 95% and 99%
num_telecom.describe(percentiles=[.25, .5, .75, .90, .95, .99])
# From the distribution shown above, you can see that there no outliers in your data. The numbers are gradually increasing.
# #### Checking for Missing Values and Inputing Them
# Adding up the missing values (column-wise)
telecom.isnull().sum()
# It means that 11/7043 = 0.001561834 i.e 0.1%, best is to remove these observations from the analysis
# Checking the percentage of missing values
round(100*(telecom.isnull().sum()/len(telecom.index)), 2)
# Removing NaN TotalCharges rows
telecom = telecom[~np.isnan(telecom['TotalCharges'])]
# Checking percentage of missing values after removing the missing values
round(100*(telecom.isnull().sum()/len(telecom.index)), 2)
# Now we don't have any missing values
# ### Step 4: Test-Train Split
from sklearn.model_selection import train_test_split
# +
# Putting feature variable to X
X = telecom.drop(['Churn','customerID'], axis=1)
X.head()
# +
# Putting response variable to y
y = telecom['Churn']
y.head()
# -
# Splitting the data into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7, test_size=0.3, random_state=100)
# ### Step 5: Feature Scaling
from sklearn.preprocessing import StandardScaler
# +
scaler = StandardScaler()
X_train[['tenure','MonthlyCharges','TotalCharges']] = scaler.fit_transform(X_train[['tenure','MonthlyCharges','TotalCharges']])
X_train.head()
# -
### Checking the Churn Rate
churn = (sum(telecom['Churn'])/len(telecom['Churn'].index))*100
churn
# We have almost 27% churn rate
# ### Step 6: Looking at Correlations
# Importing matplotlib and seaborn
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# Let's see the correlation matrix
plt.figure(figsize = (20,10)) # Size of the figure
sns.heatmap(telecom.corr(),annot = True)
plt.show()
# #### Dropping highly correlated dummy variables
X_test = X_test.drop(['MultipleLines_No','OnlineSecurity_No','OnlineBackup_No','DeviceProtection_No','TechSupport_No',
'StreamingTV_No','StreamingMovies_No'], 1)
X_train = X_train.drop(['MultipleLines_No','OnlineSecurity_No','OnlineBackup_No','DeviceProtection_No','TechSupport_No',
'StreamingTV_No','StreamingMovies_No'], 1)
# #### Checking the Correlation Matrix
# After dropping highly correlated variables now let's check the correlation matrix again.
plt.figure(figsize = (20,10))
sns.heatmap(X_train.corr(),annot = True)
plt.show()
# ### Step 7: Model Building
# Let's start by splitting our data into a training set and a test set.
# #### Running Your First Training Model
import statsmodels.api as sm
# Logistic regression model
logm1 = sm.GLM(y_train,(sm.add_constant(X_train)), family = sm.families.Binomial())
logm1.fit().summary()
# ### Step 8: Feature Selection Using RFE
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
from sklearn.feature_selection import RFE
rfe = RFE(logreg, 15) # running RFE with 13 variables as output
rfe = rfe.fit(X_train, y_train)
rfe.support_
list(zip(X_train.columns, rfe.support_, rfe.ranking_))
col = X_train.columns[rfe.support_]
X_train.columns[~rfe.support_]
# ##### Assessing the model with StatsModels
X_train_sm = sm.add_constant(X_train[col])
logm2 = sm.GLM(y_train,X_train_sm, family = sm.families.Binomial())
res = logm2.fit()
res.summary()
# Getting the predicted values on the train set
y_train_pred = res.predict(X_train_sm)
y_train_pred[:10]
y_train_pred = y_train_pred.values.reshape(-1)
y_train_pred[:10]
# ##### Creating a dataframe with the actual churn flag and the predicted probabilities
y_train_pred_final = pd.DataFrame({'Churn':y_train.values, 'Churn_Prob':y_train_pred})
y_train_pred_final['CustID'] = y_train.index
y_train_pred_final.head()
# ##### Creating new column 'predicted' with 1 if Churn_Prob > 0.5 else 0
# +
y_train_pred_final['predicted'] = y_train_pred_final.Churn_Prob.map(lambda x: 1 if x > 0.5 else 0)
# Let's see the head
y_train_pred_final.head()
# -
from sklearn import metrics
# Confusion matrix
confusion = metrics.confusion_matrix(y_train_pred_final.Churn, y_train_pred_final.predicted )
print(confusion)
# +
# Predicted not_churn churn
# Actual
# not_churn 3270 365
# churn 579 708
# -
# Let's check the overall accuracy.
print(metrics.accuracy_score(y_train_pred_final.Churn, y_train_pred_final.predicted))
# #### Checking VIFs
# Check for the VIF values of the feature variables.
from statsmodels.stats.outliers_influence import variance_inflation_factor
# Create a dataframe that will contain the names of all the feature variables and their respective VIFs
vif = pd.DataFrame()
vif['Features'] = X_train[col].columns
vif['VIF'] = [variance_inflation_factor(X_train[col].values, i) for i in range(X_train[col].shape[1])]
vif['VIF'] = round(vif['VIF'], 2)
vif = vif.sort_values(by = "VIF", ascending = False)
vif
# There are a few variables with high VIF. It's best to drop these variables as they aren't helping much with prediction and unnecessarily making the model complex. The variable 'PhoneService' has the highest VIF. So let's start by dropping that.
col = col.drop('PhoneService', 1)
col
# Let's re-run the model using the selected variables
X_train_sm = sm.add_constant(X_train[col])
logm3 = sm.GLM(y_train,X_train_sm, family = sm.families.Binomial())
res = logm3.fit()
res.summary()
y_train_pred = res.predict(X_train_sm).values.reshape(-1)
y_train_pred[:10]
y_train_pred_final['Churn_Prob'] = y_train_pred
# Creating new column 'predicted' with 1 if Churn_Prob > 0.5 else 0
y_train_pred_final['predicted'] = y_train_pred_final.Churn_Prob.map(lambda x: 1 if x > 0.5 else 0)
y_train_pred_final.head()
# Let's check the overall accuracy.
print(metrics.accuracy_score(y_train_pred_final.Churn, y_train_pred_final.predicted))
# So overall the accuracy hasn't dropped much.
# ##### Let's check the VIFs again
vif = pd.DataFrame()
vif['Features'] = X_train[col].columns
vif['VIF'] = [variance_inflation_factor(X_train[col].values, i) for i in range(X_train[col].shape[1])]
vif['VIF'] = round(vif['VIF'], 2)
vif = vif.sort_values(by = "VIF", ascending = False)
vif
# Let's drop TotalCharges since it has a high VIF
col = col.drop('TotalCharges')
col
# Let's re-run the model using the selected variables
X_train_sm = sm.add_constant(X_train[col])
logm4 = sm.GLM(y_train,X_train_sm, family = sm.families.Binomial())
res = logm4.fit()
res.summary()
y_train_pred = res.predict(X_train_sm).values.reshape(-1)
y_train_pred[:10]
y_train_pred_final['Churn_Prob'] = y_train_pred
# Creating new column 'predicted' with 1 if Churn_Prob > 0.5 else 0
y_train_pred_final['predicted'] = y_train_pred_final.Churn_Prob.map(lambda x: 1 if x > 0.5 else 0)
y_train_pred_final.head()
# Let's check the overall accuracy.
print(metrics.accuracy_score(y_train_pred_final.Churn, y_train_pred_final.predicted))
# The accuracy is still practically the same.
# ##### Let's now check the VIFs again
vif = pd.DataFrame()
vif['Features'] = X_train[col].columns
vif['VIF'] = [variance_inflation_factor(X_train[col].values, i) for i in range(X_train[col].shape[1])]
vif['VIF'] = round(vif['VIF'], 2)
vif = vif.sort_values(by = "VIF", ascending = False)
vif
# All variables have a good value of VIF. So we need not drop any more variables and we can proceed with making predictions using this model only
# Let's take a look at the confusion matrix again
confusion = metrics.confusion_matrix(y_train_pred_final.Churn, y_train_pred_final.predicted )
confusion
# Actual/Predicted not_churn churn
# not_churn 3269 366
# churn 595 692
# Let's check the overall accuracy.
metrics.accuracy_score(y_train_pred_final.Churn, y_train_pred_final.predicted)
# ## Metrics beyond simply accuracy
TP = confusion[1,1] # true positive
TN = confusion[0,0] # true negatives
FP = confusion[0,1] # false positives
FN = confusion[1,0] # false negatives
# Let's see the sensitivity of our logistic regression model
TP / float(TP+FN)
# Let us calculate specificity
TN / float(TN+FP)
# Calculate false postive rate - predicting churn when customer does not have churned
print(FP/ float(TN+FP))
# positive predictive value
print (TP / float(TP+FP))
# Negative predictive value
print (TN / float(TN+ FN))
# ### Step 9: Plotting the ROC Curve
# An ROC curve demonstrates several things:
#
# - It shows the tradeoff between sensitivity and specificity (any increase in sensitivity will be accompanied by a decrease in specificity).
# - The closer the curve follows the left-hand border and then the top border of the ROC space, the more accurate the test.
# - The closer the curve comes to the 45-degree diagonal of the ROC space, the less accurate the test.
def draw_roc( actual, probs ):
fpr, tpr, thresholds = metrics.roc_curve( actual, probs,
drop_intermediate = False )
auc_score = metrics.roc_auc_score( actual, probs )
plt.figure(figsize=(5, 5))
plt.plot( fpr, tpr, label='ROC curve (area = %0.2f)' % auc_score )
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate or [1 - True Negative Rate]')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
return None
fpr, tpr, thresholds = metrics.roc_curve( y_train_pred_final.Churn, y_train_pred_final.Churn_Prob, drop_intermediate = False )
list(zip(fpr,tpr,thresholds))
draw_roc(y_train_pred_final.Churn, y_train_pred_final.Churn_Prob)
# ### Step 10: Finding Optimal Cutoff Point
# Optimal cutoff probability is that prob where we get balanced sensitivity and specificity
# Let's create columns with different probability cutoffs
numbers = [float(x)/10 for x in range(10)]
for i in numbers:
y_train_pred_final[i]= y_train_pred_final.Churn_Prob.map(lambda x: 1 if x > i else 0)
y_train_pred_final.head()
# +
# Now let's calculate accuracy sensitivity and specificity for various probability cutoffs.
cutoff_df = | pd.DataFrame( columns = ['prob','accuracy','sensi','speci']) | pandas.DataFrame |
"""Prediction result visualization"""
import pandas as pd
import matplotlib.pyplot as plt
def visualize(result, y_test, num_test, rmse):
"""
:param result: RUL prediction results
:param y_test: true RUL of testing set
:param num_test: number of samples
:param rmse: RMSE of prediction results
"""
result = y_test.join( | pd.DataFrame(result) | pandas.DataFrame |
import imagehash
import pandas as pd
import numpy as np
import os
import sys
import math
hashesPath = sys.argv[1]
outPath = sys.argv[2]
df1 = | pd.read_csv(hashesPath) | pandas.read_csv |
from __future__ import annotations
import os
import numpy as np
import pandas as pd
import scipy.optimize as so
import scipy.special as sp
from pymwm.utils.cutoff_utils import f_fp_fpp_cython
class Cutoff:
"""A callable class that calculates the values of u at cutoff frequencies for coaxial waveguides made of PEC
Attributes:
num_n (int): The number of orders of modes.
num_m (int): The number of modes in each order and polarization.
r_ratios (list[float]): A list of ratio between inner and outer radii.
samples (DataFrame): Cutoff values of u at r_ratios.
"""
dirname = os.path.join(os.path.expanduser("~"), ".pymwm")
filename = os.path.join(dirname, "cutoff.h5")
def __init__(self, num_n: int, num_m: int) -> None:
"""Init Cutoff class."""
self.num_n, self.num_m = num_n, num_m
self.r_ratios = 0.001 * np.arange(1000)
if os.path.exists(self.filename):
samples = pd.read_hdf(self.filename)
if self.num_n > samples["n"].max() + 1 or self.num_m >= samples["m"].max():
self.samples = self.cutoffs()
self.samples.to_hdf(self.filename, "cutoff")
else:
self.samples = samples[
(samples["n"] < num_n)
& (
(samples["pol"] == "E") & (samples["m"] <= num_m)
| (samples["pol"] == "M") & (samples["m"] <= num_m + 1)
)
].reset_index(drop=True)
if not os.path.exists(self.filename):
if not os.path.exists(self.dirname):
print("Folder Not Found.")
os.mkdir(self.dirname)
print("File Not Found.")
self.samples = self.cutoffs()
self.samples.to_hdf(self.filename, "cutoff")
def __call__(self, alpha: tuple, r_ratio: float) -> float:
"""Return the cutoff value of u
Args:
alpha (tuple[pol (str), n (int), m (int)]):
pol: 'E' or 'M' indicating the polarization.
n: A integer indicating the order of the mode.
m: A integer indicating the ordinal of the mode in the same
order.
r_ratio (float): The ratio between inner and outer radii.
Returns:
float: The value of u at cutoff frequency.
"""
df = self.samples
df = df[(df["pol"] == alpha[0]) & (df["n"] == alpha[1]) & (df["m"] == alpha[2])]
x = df["rr"].to_numpy()
y = df["val"].to_numpy()
i: int = np.where(x <= r_ratio)[0][-1]
a: float = r_ratio - x[i]
b: float = x[i + 1] - r_ratio
val: float = (b * y[i] + a * y[i + 1]) / (a + b)
return val
def PEC(
self, u: float, r_ratio: float, n: int, pol: str
) -> tuple[float, float, float]:
x = u * r_ratio
def f_fp_fpp_fppp(func, z):
f = func(n, z)
fp = -func(n + 1, z) + n / z * f
fpp = -fp / z - (1 - n ** 2 / z ** 2) * f
fppp = -fpp / z - fp + (n ** 2 + 1) * fp / z ** 2 - 2 * n ** 2 / z ** 3 * f
return f, fp, fpp, fppp
jx, jpx, jppx, jpppx = f_fp_fpp_fppp(sp.jv, x)
yx, ypx, yppx, ypppx = f_fp_fpp_fppp(sp.yv, x)
ju, jpu, jppu, jpppu = f_fp_fpp_fppp(sp.jv, u)
yu, ypu, yppu, ypppu = f_fp_fpp_fppp(sp.yv, u)
if pol == "E":
f = jpx * ypu - ypx * jpu
fp = r_ratio * jppx * ypu + jpx * yppu - r_ratio * yppx * jpu - ypx * jppu
fpp = (
r_ratio ** 2 * jpppx * ypu
+ 2 * r_ratio * jppx * yppu
+ jpx * ypppu
- r_ratio ** 2 * ypppx * jpu
- 2 * r_ratio * yppx * jppu
- ypx * jpppu
)
else:
f = jx * yu - yx * ju
fp = r_ratio * jpx * yu + jx * ypu - r_ratio * ypx * ju - yx * jpu
fpp = (
r_ratio ** 2 * jppx * yu
+ 2 * r_ratio * jpx * ypu
+ jx * yppu
- r_ratio ** 2 * yppx * ju
- 2 * r_ratio * ypx * jpu
- yx * jppu
)
return f, fp, fpp
def cutoffs_numpy(self) -> pd.DataFrame:
import ray
if not ray.is_initialized():
ray.init()
rrs_id = ray.put(self.r_ratios)
pec_id = ray.put(self.PEC)
@ray.remote
def func(alpha, kini, rrs, pec):
pol, n, m = alpha
drr = 0.1 * (rrs[1] - rrs[0])
x0 = x1 = kini
z = []
for rr in rrs:
z.append(x1)
for i in range(1, 11):
sol = so.root_scalar(
pec,
x0=2 * x1 - x0,
fprime=True,
fprime2=True,
method="halley",
args=(rr + i * drr, n, pol),
)
x0 = x1
x1 = sol.root
return z
num_m = self.num_m
args = []
kinis = sp.jn_zeros(0, num_m)
for m in range(2, num_m + 2):
args.append((("M", 0, m), kinis[m - 2]))
kinis = sp.jnp_zeros(0, num_m)
for m in range(1, num_m + 1):
args.append((("E", 0, m), kinis[m - 1]))
for n in range(1, self.num_n):
for (pol, m_end) in [("M", num_m + 2), ("E", num_m + 1)]:
if pol == "E":
kinis = sp.jnp_zeros(n, m_end - 1)
else:
kinis = sp.jn_zeros(n, m_end - 1)
for m in range(1, m_end):
kini = kinis[m - 1]
args.append(((pol, n, m), kini))
result_ids = [func.remote(alpha, kini, rrs_id, pec_id) for alpha, kini in args]
results = ray.get(result_ids)
if ray.is_initialized():
ray.shutdown()
df = pd.DataFrame()
num_rr = len(self.r_ratios)
df["pol"] = np.full(num_rr, "M")
df["n"] = 0
df["m"] = 1
df["irr"] = np.arange(num_rr)
df["rr"] = self.r_ratios
df["val"] = 0.0
for i in range(len(args)):
(pol, n, m), _ = args[i]
z = results[i]
df1 = pd.DataFrame()
df1["pol"] = np.full(num_rr, pol)
df1["n"] = n
df1["m"] = m
df1["irr"] = np.arange(num_rr)
df1["rr"] = self.r_ratios
df1["val"] = z
df = pd.concat([df, df1], ignore_index=True)
return df
def cutoffs(self) -> pd.DataFrame:
import ray
if not ray.is_initialized():
ray.init()
rrs_id = ray.put(self.r_ratios)
@ray.remote
def func(alpha, kini, rrs):
pol, n, m = alpha
drr = 0.1 * (rrs[1] - rrs[0])
x0 = x1 = kini
z = []
for rr in rrs:
z.append(x1)
for i in range(1, 11):
sol = so.root_scalar(
f_fp_fpp_cython,
x0=2 * x1 - x0,
fprime=True,
fprime2=True,
method="halley",
args=(rr + i * drr, n, pol),
)
x0 = x1
x1 = sol.root
return z
num_m = self.num_m
args = []
kinis = sp.jn_zeros(0, num_m)
for m in range(2, num_m + 2):
args.append((("M", 0, m), kinis[m - 2]))
kinis = sp.jnp_zeros(0, num_m)
for m in range(1, num_m + 1):
args.append((("E", 0, m), kinis[m - 1]))
for n in range(1, self.num_n):
for (pol, m_end) in [("M", num_m + 2), ("E", num_m + 1)]:
if pol == "E":
kinis = sp.jnp_zeros(n, m_end - 1)
else:
kinis = sp.jn_zeros(n, m_end - 1)
for m in range(1, m_end):
kini = kinis[m - 1]
args.append(((pol, n, m), kini))
result_ids = [func.remote(alpha, kini, rrs_id) for alpha, kini in args]
results = ray.get(result_ids)
if ray.is_initialized():
ray.shutdown()
df = pd.DataFrame()
num_rr = len(self.r_ratios)
df["pol"] = np.full(num_rr, "M")
df["n"] = 0
df["m"] = 1
df["irr"] = np.arange(num_rr)
df["rr"] = self.r_ratios
df["val"] = 0.0
for i in range(len(args)):
(pol, n, m), _ = args[i]
z = results[i]
df1 = | pd.DataFrame() | pandas.DataFrame |
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score
import random
import numpy as np
import hydra
from omegaconf import DictConfig
from pytorch_lightning import (
LightningDataModule,
Trainer,
seed_everything,
)
import pandas as pd
from pytorch_tabular import TabularModel
from pytorch_tabular.config import DataConfig, OptimizerConfig, TrainerConfig, ExperimentConfig
from pytorch_tabular.models import CategoryEmbeddingModelConfig, NodeConfig
def make_mixed_classification(n_samples, n_features, n_categories):
X,y = make_classification(n_samples=n_samples, n_features=n_features, random_state=42, n_informative=5)
cat_cols = random.choices(list(range(X.shape[-1])),k=n_categories)
num_cols = [i for i in range(X.shape[-1]) if i not in cat_cols]
for col in cat_cols:
X[:,col] = pd.qcut(X[:,col], q=4).codes.astype(int)
col_names = []
num_col_names=[]
cat_col_names=[]
for i in range(X.shape[-1]):
if i in cat_cols:
col_names.append(f"cat_col_{i}")
cat_col_names.append(f"cat_col_{i}")
if i in num_cols:
col_names.append(f"num_col_{i}")
num_col_names.append(f"num_col_{i}")
X = pd.DataFrame(X, columns=col_names)
y = | pd.Series(y, name="target") | pandas.Series |
import numpy as np
import pandas as pd
import scanpy as sc
from scipy import sparse
from sklearn.linear_model import LinearRegression
from ..utils import check_adata, check_batch
def pcr_comparison(
adata_pre,
adata_post,
covariate,
embed=None,
n_comps=50,
scale=True,
verbose=False
):
"""
Compare the explained variance before and after integration
Return either the difference of variance contribution before and after integration
or a score between 0 and 1 (`scaled=True`) with 0 if the variance contribution hasn't
changed. The larger the score, the more different the variance contributions are before
and after integration.
:param adata_pre: anndata object before integration
:param adata_post: anndata object after integration
:param covariate: Key for adata.obs column to regress against
:param embed: Embedding to use for principal components.
If None, use the full expression matrix (`adata.X`), otherwise use the embedding
provided in `adata_post.obsm[embed]`.
:param n_comps: Number of principal components to compute
:param scale: If True, scale score between 0 and 1 (default)
:param verbose:
:return:
Difference of R2Var value of PCR (scaled between 0 and 1 by default)
"""
if embed == 'X_pca':
embed = None
pcr_before = pcr(
adata_pre,
covariate=covariate,
recompute_pca=True,
n_comps=n_comps,
verbose=verbose
)
pcr_after = pcr(
adata_post,
covariate=covariate,
embed=embed,
recompute_pca=True,
n_comps=n_comps,
verbose=verbose
)
if scale:
score = (pcr_before - pcr_after) / pcr_before
if score < 0:
print(
"Variance contribution increased after integration!\n"
"Setting PCR comparison score to 0."
)
score = 0
return score
else:
return pcr_after - pcr_before
def pcr(
adata,
covariate,
embed=None,
n_comps=50,
recompute_pca=True,
verbose=False
):
"""
Principal component regression for anndata object
Checks whether to
+ compute PCA on embedding or expression data (set `embed` to name of embedding matrix e.g. `embed='X_emb'`)
+ use existing PCA (only if PCA entry exists)
+ recompute PCA on expression matrix (default)
:param adata: Anndata object
:param covariate: Key for adata.obs column to regress against
:param embed: Embedding to use for principal components.
If None, use the full expression matrix (`adata.X`), otherwise use the embedding
provided in `adata_post.obsm[embed]`.
:param n_comps: Number of PCs, if PCA is recomputed
:return:
R2Var of regression
"""
check_adata(adata)
check_batch(covariate, adata.obs)
if verbose:
print(f"covariate: {covariate}")
covariate_values = adata.obs[covariate]
# use embedding for PCA
if (embed is not None) and (embed in adata.obsm):
if verbose:
print(f"Compute PCR on embedding n_comps: {n_comps}")
return pc_regression(adata.obsm[embed], covariate_values, n_comps=n_comps)
# use existing PCA computation
elif (recompute_pca == False) and ('X_pca' in adata.obsm) and ('pca' in adata.uns):
if verbose:
print("using existing PCA")
return pc_regression(adata.obsm['X_pca'], covariate_values, pca_var=adata.uns['pca']['variance'])
# recompute PCA
else:
if verbose:
print(f"compute PCA n_comps: {n_comps}")
return pc_regression(adata.X, covariate_values, n_comps=n_comps)
def pc_regression(
data,
covariate,
pca_var=None,
n_comps=50,
svd_solver='arpack',
verbose=False
):
"""
:params data: Expression or PC matrix. Assumed to be PC, if pca_sd is given.
:param covariate: series or list of batch assignments
:param n_comps: number of PCA components for computing PCA, only when pca_sd is not given.
If no pca_sd is not defined and n_comps=None, compute PCA and don't reduce data
:param pca_var: Iterable of variances for `n_comps` components.
If `pca_sd` is not `None`, it is assumed that the matrix contains PC,
otherwise PCA is computed on `data`.
:param svd_solver:
:param verbose:
:return:
R2Var of regression
"""
if isinstance(data, (np.ndarray, sparse.csr_matrix, sparse.csc_matrix)):
matrix = data
else:
raise TypeError(f'invalid type: {data.__class__} is not a numpy array or sparse matrix')
# perform PCA if no variance contributions are given
if pca_var is None:
if n_comps is None or n_comps > min(matrix.shape):
n_comps = min(matrix.shape)
if n_comps == min(matrix.shape):
svd_solver = 'full'
if verbose:
print("compute PCA")
pca = sc.tl.pca(matrix, n_comps=n_comps, use_highly_variable=False,
return_info=True, svd_solver=svd_solver, copy=True)
X_pca = pca[0].copy()
pca_var = pca[3].copy()
del pca
else:
X_pca = matrix
n_comps = matrix.shape[1]
## PC Regression
if verbose:
print("fit regression on PCs")
# handle categorical values
if pd.api.types.is_numeric_dtype(covariate):
covariate = np.array(covariate).reshape(-1, 1)
else:
if verbose:
print("one-hot encode categorical values")
covariate = | pd.get_dummies(covariate) | pandas.get_dummies |
import glob
import os
import sys
import copy
from joblib import Parallel, delayed
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyabf
from ipfx import feature_extractor
from ipfx import subthresh_features as subt
print("feature extractor loaded")
from .abf_ipfx_dataframes import _build_full_df, _build_sweepwise_dataframe, save_data_frames
from .loadABF import loadABF
from .patch_utils import plotabf, load_protocols, find_non_zero_range
from .QC import run_qc
default_dict = {'start': 0, 'end': 0, 'filter': 0}
def folder_feature_extract(files, param_dict, plot_sweeps=-1, protocol_name='IC1', para=1):
debugplot = 0
running_lab = ['Trough', 'Peak', 'Max Rise (upstroke)', 'Max decline (downstroke)', 'Width']
dfs = | pd.DataFrame() | pandas.DataFrame |
"""Contains methods and classes to collect data from
tushare API
"""
import pandas as pd
import tushare as ts
from tqdm import tqdm
class TushareDownloader :
"""Provides methods for retrieving daily stock data from
tushare API
Attributes
----------
start_date : str
start date of the data (modified from config.py)
end_date : str
end date of the data (modified from config.py)
ticker_list : list
a list of stock tickers (modified from config.py)
Methods
-------
fetch_data()
Fetches data from tushare API
date:date
Open: opening price
High: the highest price
Close: closing price
Low: lowest price
Volume: volume
Price_change: price change
P_change: fluctuation
ma5: 5-day average price
Ma10: 10 average daily price
Ma20:20 average daily price
V_ma5:5 daily average
V_ma10:10 daily average
V_ma20:20 daily average
"""
def __init__(self, start_date: str, end_date: str, ticker_list: list):
self.start_date = start_date
self.end_date = end_date
self.ticker_list = ticker_list
def fetch_data(self) -> pd.DataFrame:
"""Fetches data from Yahoo API
Parameters
----------
Returns
-------
`pd.DataFrame`
7 columns: A date, open, high, low, close, volume and tick symbol
for the specified stock ticker
"""
# Download and save the data in a pandas DataFrame:
data_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
from os import listdir
from os.path import isfile, join
from datetime import datetime
import logging
logger = logging.getLogger(__name__)
def load_csvs(paths):
"""Creates a dataframe dictionary from the csv files in /data : dict_df
Arguments
---------
param_file : paths
Path to the data files (/data)
"""
filepaths = [f for f in listdir(paths) if isfile(join(paths, f))]
onlyfiles = [os.path.join(paths, f) for f in filepaths]
dict_df = {}
for files in onlyfiles:
#validate that the files are csv. Else the read function will not work
_, filename = os.path.split(files)
name, ending = os.path.splitext(filename)
if ending == '.csv':
dict_df[name] = pd.read_csv(files, header=0)
else:
print('You have mixed file types in you directory, please make sure all are .csv type! {}'.format(files))
return dict_df
def make_outputfile(param_file):
"""Creates a string from the template OSeMOSYS file
Arguments
---------
param_file : str
Path to the parameter file
"""
allLinesFromXy = ""
with open(param_file, "r") as inputFile:
allLinesFromXy = inputFile.read()
outPutFile = allLinesFromXy
return outPutFile
def functions_to_run(dict_df, outPutFile):
"""Runs all the functions for the different parameters
Arguments
---------
dict_df, outPutFile
dict_df: is a dictionary which contains all the csv files as dataframes from load_csv. Key is the name of the csv file
outPutFile: is a string with the empty OSeMOSYS parameters file from make_outputfile
"""
if 'operational_life' in dict_df:
outPutFile = operational_life(outPutFile, dict_df['GIS_data'], dict_df['input_data'], dict_df['operational_life'])
else:
print('No operational_life file')
#################################################################################
if 'fixed_cost' in dict_df:
outPutFile = fixedcost(dict_df['GIS_data'], outPutFile, dict_df['input_data'], dict_df['fixed_cost'])
else:
print('No fixed_cost file')
#####################################################################################
if 'total_annual_technology_limit' in dict_df:
outPutFile = totaltechnologyannualactivityupperlimit(dict_df['GIS_data'], outPutFile, dict_df['input_data'], dict_df['total_annual_technology_limit'])
else:
print('No total_annual_technology_limit file')
if 'demand' in dict_df:
outPutFile = specifiedannualdemand(outPutFile, dict_df['demand'], dict_df['input_data'])
else:
print('No demand file')
####################################################################################
if 'capitalcost_RET' in dict_df:
outPutFile = capitalcost_dynamic(dict_df['GIS_data'], outPutFile, dict_df['capitalcost_RET'],
dict_df['capacityfactor_wind'], dict_df['capacityfactor_solar'],
dict_df['input_data'])
else:
print('No capitalcost_RET file')
###########################################################################
if 'capitalcost' in dict_df:
outPutFile = capitalcost(outPutFile, dict_df['capitalcost'], dict_df['input_data'])
else:
print('No capitalcost file')
# ################################################################################
if 'capacityfactor_solar' or 'capacityfactor_wind' in dict_df:
outPutFile = capacityfactor(outPutFile, dict_df['GIS_data'], dict_df['battery'], dict_df['input_data'], dict_df['capacityfactor_wind'], dict_df['capacityfactor_solar'])
else:
print('No capacityfactor_solar or capacityfactor_wind file')
# ###############################################################################
if 'capacitytoactivity' in dict_df:
outPutFile = capacitytoactivity(dict_df['capacitytoactivity'], outPutFile, dict_df['input_data'])
else:
print('No capacitytoactivity file')
#################################################################################
if 'demandprofile' in dict_df:
outPutFile = SpecifiedDemandProfile(outPutFile, dict_df['demandprofile'],
dict_df['input_data'])
else:
print('No demandprofile file')
###########################################################
###################### Mode of operation parameters######################
if 'emissions' in dict_df:
outPutFile = emissionactivity(dict_df['GIS_data'], outPutFile, dict_df['input_data'], dict_df['emissions'])
else:
print('No emissions file')
########################################################
if 'variable_cost' in dict_df:
outPutFile = variblecost(dict_df['GIS_data'], outPutFile, dict_df['input_data'], dict_df['variable_cost'])
else:
print('No variable_cost file')
#############################################################
if 'inputactivity' in dict_df:
outPutFile = inputact(outPutFile, dict_df['inputactivity'], dict_df['input_data'])
else:
print('No inputactivity file')
################################################################
if 'outputactivity' in dict_df:
outPutFile = outputactivity(outPutFile, dict_df['outputactivity'], dict_df['input_data'])
else:
print('No outputactivity file')
return(outPutFile)
def operational_life(outPutFile, GIS_data, input_data, operational_life):
"""
builds the OperationalLife (Region, Technology, OperationalLife)
-------------
Arguments
outPutFile, GIS_data, input_data, operational_life
outPutFile: is a string containing the OSeMOSYS parameters file
GIS_data: is the location specific data which is used to iterate the data
input_data: contains meta data such as region, start year, end year, months, timeslices
OperationalLife: is the operational life per technology
"""
dataToInsert = ""
print("Operational life", datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
param = "param OperationalLife default 1 :=\n"
startIndex = outPutFile.index(param) + len(param)
for i, row in GIS_data.iterrows():
location = row['Location']
for m, line in operational_life.iterrows():
t = line['Technology']
l = line['Life']
dataToInsert += "%s\t%s_%i\t%i\n" % (input_data['region'][0],t, location, l)
outPutFile = outPutFile[:startIndex] + dataToInsert + outPutFile[startIndex:]
return(outPutFile)
def fixedcost(df, outPutFile, input_data, fixed_cost):
"""
Builds the Fixed cost (Region, Technology, Year, Fixed cost)
-------------
Arguments
df, outPutFile, input_data, fixed_cost
outPutFile: is a string containing the OSeMOSYS parameters file
df: is the location specific data which is used to iterate the data
input_data: contains meta data such as region, start year, end year, months, timeslices
fixed_cost: is the fixed cost per technology
"""
print("Fixed cost", datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
dataToInsert = ""
param = "param FixedCost default 0 :=\n"
startIndex = outPutFile.index(param) + len(param)
for i, row in df.iterrows():
location = row['Location']
for m, line in fixed_cost.iterrows():
t = line['Technology']
fc = line['Fixed Cost']
year = int(input_data['startyear'][0])
while year <= int(input_data['endyear'][0]):
dataToInsert += "%s\t%s_%i\t%i\t%f\n" % (input_data['region'][0], t, location, year, fc)
year += 1
outPutFile = outPutFile[:startIndex] + dataToInsert + outPutFile[startIndex:]
return(outPutFile)
def emissionactivity(df, outPutFile, input_data, emissions):
"""
Builds the Emission activity (Region, Technology, Emissiontype, Technology, ModeofOperation, Year, emissionactivity)
-------------
Arguments
df, outPutFile, input_data, emissions
outPutFile: is a string containing the OSeMOSYS parameters file
df: is the location specific data which is used to iterate the data
input_data: contains meta data such as region, start year, end year, months, timeslices
emissions: is the emissionactivity per technology and mode of operation
"""
print("Emission activity", datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
dataToInsert = ""
param = "param EmissionActivityRatio default 0 :=\n"
startIndex = outPutFile.index(param) + len(param)
for i, row in df.iterrows():
location = i
for m, line in emissions.iterrows():
year = int(input_data['startyear'][0])
t = line['Technology']
k = line['Modeofoperation']
CO2 = line['CO2']
NOx = line['NOx']
while year <= int(input_data['endyear'][0]):
dataToInsert += "%s\t%s_%i\tCO2\t%i\t%i\t%f\n" % (input_data['region'][0], t, location, k, year, CO2)
dataToInsert += "%s\t%s_%i\tNOX\t%i\t%i\t%f\n" % (input_data['region'][0], t, location, k, year, NOx)
year += 1
outPutFile = outPutFile[:startIndex] + dataToInsert + outPutFile[startIndex:]
return (outPutFile)
def variblecost(df, outPutFile, input_data, variable_cost):
"""
Builds the Variable cost (Region, Technology, ModeofOperation, Year, Variablecost)
-------------
Arguments
df, outPutFile, input_data, variable_cost
outPutFile: is a string containing the OSeMOSYS parameters file
df: is the location specific data which is used to iterate the data
input_data: contains meta data such as region, start year, end year, months, timeslices
variable_cost: is the variable cost per technology and mode of operation
"""
print("Variable cost", datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
dataToInsert = ""
param = "param VariableCost default 0 :=\n"
startIndex = outPutFile.index(param) + len(param)
for i, row in df.iterrows():
location = str(row['Location'])
year = int(input_data['startyear'][0])
for m, line in variable_cost.iterrows():
while year <= int(input_data['endyear'][0]):
t = line['Technology']
vc = line['Variable Cost']
modeofop = line['ModeofOperation']
dataToInsert += "%s\t%s_%s\t%i\t%i\t%f\n" % (input_data['region'][0], t, location, modeofop, year, vc)
year += 1
outPutFile = outPutFile[:startIndex] + dataToInsert + outPutFile[startIndex:]
return(outPutFile)
def totaltechnologyannualactivityupperlimit(df,outPutFile, input_data, totalannuallimit):
"""
Builds the TotalTechnologyAnnualActivityUpperLimit (Region, Technology, Year, TotalTechnologyUpperLimit)
-------------
Arguments
df,outPutFile, input_data, totalannuallimit
outPutFile: is a string containing the OSeMOSYS parameters file
df: is the location specific data which is used to iterate for the location specific the data
input_data: contains meta data such as region, start year, end year, months, timeslices
totalannuallimit: is the total annual limit per technology
"""
print("TotalTechnologyAnnualActivityUpperLimit", datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
dataToInsert = ""
param = "param TotalTechnologyAnnualActivityUpperLimit default 99999999999 :=\n"
startIndex = outPutFile.index(param) + len(param)
for index, row in df.iterrows():
location = row['Location']
year = int(input_data['startyear'][0])
while year <= int(input_data['endyear'][0]):
for m, line in totalannuallimit.iterrows():
tech = line['Technology']
cf = line[location]
dataToInsert += "%s\t%s_%i\t%i\t%f\n" % (input_data['region'][0], tech, location, year, cf)
year = year + 1
outPutFile = outPutFile[:startIndex] + dataToInsert + outPutFile[startIndex:]
return(outPutFile)
def inputact(outPutFile, inputactivity, input_data):
"""
Builds the InputactivityRatio (Region, Technology, Fuel, Modeofoperation, Year, InputactivityRatio)
-------------
Arguments
outPutFile, inputactivity, input_data
outPutFile: is a string containing the OSeMOSYS parameters file
input_data: contains meta data such as region, start year, end year, months, timeslices
inputactivity: is the inputactivity per fuel and technology
"""
dataToInsert = ""
print("Input activity", datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
param = "param InputActivityRatio default 0 :=\n"
startIndex = outPutFile.index(param) + len(param)
for j, row in inputactivity.iterrows():
technology = row['Technology']
fuel = row['Fuel']
inputactivityratio = row['Inputactivity']
modeofoperation = row['ModeofOperation']
year = int(input_data['startyear'][0])
while year<=int(input_data['endyear'][0]):
dataToInsert += "%s\t%s\t%s\t%i\t%i\t%f\n" % (input_data['region'][0], technology, fuel, modeofoperation, year, inputactivityratio)
year = year + 1
outPutFile = outPutFile[:startIndex] + dataToInsert + outPutFile[startIndex:]
return (outPutFile)
def SpecifiedDemandProfile(outPutFile, demandprofile, input_data):
"""
Builds the SpecifiedDemandProfile (Region, Fuel, Timeslice, Year, SpecifiedDemandProfile)
-------------
Arguments
outPutFile, demandprofile,input_data
outPutFile: is a string containing the OSeMOSYS parameters file
input_data: contains meta data such as region, start year, end year, months, timeslices
demandprofile: is the demandprofile per fuel and technology
"""
dataToInsert = ""
print("SpecifiedDemandProfile", datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
param = "param SpecifiedDemandProfile default 0 :=\n"
startIndex = outPutFile.index(param) + len(param)
fuels = input_data['Demand fuel']
demand_fuels = [x for x in fuels if str(x) != 'nan']
d = demandprofile.columns[1:]
for i in demand_fuels:
for k, line in demandprofile.iterrows():
timeslice = line['Timeslice']
for j in d:
demandprofile.index = demandprofile['Timeslice']
demand_profile = demandprofile.loc[timeslice][j]
dataToInsert += "%s\t%s\t%s\t%s\t%f\n" % (input_data['region'][0], i, timeslice, j, demand_profile)
outPutFile = outPutFile[:startIndex] + dataToInsert + outPutFile[startIndex:]
return(outPutFile)
def capacityfactor(outPutFile, df, battery, input_data, capacityfactor_wind, capacityfactor_solar):
"""
builds the Capacityfactor(Region, Technolgy, Timeslice, Year, CapacityFactor)
This method is for capacityfactor which does not use storage equations but still model batteries
-------------
Arguments
outPutFile, df, capacityfactor_solar, input_data, capitalcost_RET
outPutFile: is a string containing the OSeMOSYS parameters file
df: is the location specific data which is used to iterate the data
battery: contains meta data on technologies that you want ot model batteries, time and capacityfactor
input_data: contains meta data such as region, start year, end year, months, timeslices
capitalcost_RET: --
"""
print("Capacity factor", datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
param = "param CapacityFactor default 1 :=\n"
startIndex = outPutFile.index(param) + len(param)
dataToInsert = ""
#read input data
def convert(value):
try:
return int(value)
except ValueError:
try:
return float(value)
except ValueError:
return value
month = (input_data['Month']) #the list includes Nan
mon = [convert(value) for value in month]
mont = [x for x in mon if str(x) != 'nan']
months = ['{:02d}'.format(x) for x in mont] #padding numbers
timeslice = input_data['Timeslice']
timesliceDN = str(input_data['timesliceDN'][0])
timesliceDE = str(input_data['timesliceDE'][0])
timesliceED = str(input_data['timesliceED'][0])
timesliceEN = str(input_data['timesliceEN'][0])
timesliceNE = str(input_data['timesliceNE'][0])
timesliceND = str(input_data['timesliceND'][0])
region = input_data['region'][0]
startyear = input_data['startyear'][0]
endyear = input_data['endyear'][0]
type = input_data.groupby('renewable ninjafile')
solar = type.get_group('capacityfactor_solar')
solar_tech = solar['Tech']
wind = type.get_group('capacityfactor_wind')
wind_tech = wind['Tech']
#deep copy renewable ninja data
capacityfactor_solar_ = capacityfactor_solar.copy()
capacityfactor_solar_p = pd.to_datetime(capacityfactor_solar_['date'], errors='coerce', format='%Y/%m/%d %H:%M')
capacityfactor_solar_.index = capacityfactor_solar_p
capacityfactor_solar_pv = capacityfactor_solar_.drop(columns=['date'])
for k, row in df.iterrows():
location = str(row['Location'])
year = startyear
while year <= endyear:
m = 0
while m < 11:
currentMonth = months[m]
startDate = "2016-%s-01" % (currentMonth)
endDate = "2016-%s-01" % (months[m + 1])
thisMonthOnly = capacityfactor_solar_pv.loc[startDate:endDate]
sliceStart = timesliceDN
sliceEnd = timesliceDE
ts = "%iD" % (m + 1)
slice = sum(thisMonthOnly[(location)].between_time(sliceStart, sliceEnd))
try:
average_solar = ((slice / len(thisMonthOnly.between_time(sliceStart, sliceEnd)._values)))
except ZeroDivisionError:
average_solar = 0
for t in solar_tech:
dataToInsert += "%s\t%s_%s\t%s\t%i\t%f\n" % (region, t, location , ts, year, average_solar)
sliceStart = timesliceED
sliceEnd = timesliceEN
ts = "%iE" % (m + 1)
slice = sum(thisMonthOnly[(location)].between_time(sliceStart, sliceEnd))
try:
average_solar = (
(slice / len(thisMonthOnly.between_time(sliceStart, sliceEnd)._values)))
except ZeroDivisionError:
average_solar = 0
for t in solar_tech:
dataToInsert += "%s\t%s_%s\t%s\t%i\t%f\n" % (region,t, location, ts, year, average_solar)
sliceStart = timesliceNE
sliceEnd = timesliceND
ts = "%iN" % (m + 1)
slice = sum(thisMonthOnly[(location)].between_time(sliceStart, sliceEnd))
try:
average_solar = (
(slice / len(thisMonthOnly.between_time(sliceStart, sliceEnd)._values)))
except ZeroDivisionError:
average_solar = 0
for t in solar_tech:
dataToInsert += "%s\t%s_%s\t%s\t%i\t%f\n" % (region,t, location, ts, year, average_solar)
m = m + 1
while m == 11:
currentMonth = months[m]
startDate = "2016-%s-01" % (currentMonth)
endDate = "2016-%s-31" % (months[m])
thisMonthOnly = capacityfactor_solar_pv.loc[startDate:endDate]
sliceStart = timesliceDN
sliceEnd = timesliceDE
ts = "%iD" % (m + 1)
slice = sum(thisMonthOnly[(location)].between_time(sliceStart, sliceEnd))
try:
average_solar = (
(slice / len(thisMonthOnly.between_time(sliceStart, sliceEnd)._values)))
except ZeroDivisionError:
average_solar = 0
for t in solar_tech:
dataToInsert += "%s\t%s_%s\t%s\t%i\t%f\n" % (region,t, location, ts, year, average_solar)
sliceStart = timesliceED
sliceEnd = timesliceEN
ts = "%iE" % (m + 1)
slice = sum(thisMonthOnly[(location)].between_time(sliceStart, sliceEnd))
try:
average_solar = (slice / len(thisMonthOnly.between_time(sliceStart, sliceEnd)._values))
except ZeroDivisionError:
average_solar = 0
for t in solar_tech:
dataToInsert += "%s\t%s_%s\t%s\t%i\t%f\n" % (region,t, location, ts, year, average_solar)
sliceStart = timesliceNE
sliceEnd = timesliceND
ts = "%iN" % (m + 1)
slice = sum(thisMonthOnly[(location)].between_time(sliceStart, sliceEnd))
try:
average_solar = (
(slice / len(thisMonthOnly.between_time(sliceStart, sliceEnd)._values)))
except ZeroDivisionError:
average_solar = 0
for t in solar_tech:
dataToInsert += "%s\t%s_%s\t%s\t%i\t%f\n" % (region,t, location, ts, year, average_solar)
m = m + 1
year = year + 1
if battery is None:
pass
else:
tech = battery.groupby('renewable ninjafile')
solar_battery = tech.get_group('capacityfactor_solar')
for j, line in solar_battery.iterrows():
capacityfactor_solar_batt = capacityfactor_solar.copy() # deep copy
for k, row in df.iterrows():
location = str(row['Location'])
batteryCapacityFactor = line['Batterycapacityfactor']
batteryTime = line['BatteryTime']
lastRowWasZero = False
batteryConsumed = False
index = 0
for solarCapacity in capacityfactor_solar_batt[location].values:
currentRowIsZero = solarCapacity == 0
if not currentRowIsZero:
# This will happen when the current row is not zero. We should "reset" everything.
batteryTime = line['BatteryTime']
batteryCapacityFactor = line['Batterycapacityfactor']
batteryConsumed = False
lastRowWasZero = False
elif batteryTime == int(0):
# This will happen when the current value is 0, the last value was zero and there is no batterytime left.
batteryConsumed = True
batteryTime = line['BatteryTime']
batteryCapacityFactor = line['Batterycapacityfactor']
elif solarCapacity == 0 and lastRowWasZero and not batteryConsumed:
# This will happen when the last row was zero and the current row is 0.
capacityfactor_solar_batt.at[index, location] = batteryCapacityFactor
lastRowWasZero = True
batteryTime -= 1
elif not batteryConsumed:
# This will happen when the last row was not zero and the current row is 0. Same as above?
capacityfactor_solar_batt.at[index, location] = batteryCapacityFactor
lastRowWasZero = True
batteryTime -= 1
index += 1
capacityfactor_solar_b = capacityfactor_solar_batt.copy()
capacityfactor_solar_b.index = capacityfactor_solar_p
capacityfactor_solar_battery = capacityfactor_solar_b.drop(columns=['date'])
year = startyear
while year <= endyear:
m = 0
while m < 11:
currentMonth = months[m]
startDate = "2016-%s-01" % (currentMonth)
endDate = "2016-%s-01" % (months[m + 1])
thisMonthOnly = capacityfactor_solar_battery.loc[startDate:endDate]
sliceStart = timesliceDN
sliceEnd = timesliceDE
ts = "%iD" % (m + 1)
slice = sum(thisMonthOnly[(location)].between_time(sliceStart, sliceEnd))
try:
average_solar = ((slice / len(thisMonthOnly.between_time(sliceStart, sliceEnd)._values)))
except ZeroDivisionError:
average_solar = 0
dataToInsert += "%s\t%s_%ih_%s\t%s\t%i\t%f\n" % (region, line['Technology'], line['BatteryTime'], location, ts, year, average_solar)
sliceStart = timesliceED
sliceEnd = timesliceEN
ts = "%iE" % (m + 1)
slice = sum(thisMonthOnly[(location)].between_time(sliceStart, sliceEnd))
try:
average_solar = (
(slice / len(thisMonthOnly.between_time(sliceStart, sliceEnd)._values)))
except ZeroDivisionError:
average_solar = 0
dataToInsert += "%s\t%s_%ih_%s\t%s\t%i\t%f\n" % (region, line['Technology'], line['BatteryTime'], location, ts, year, average_solar)
sliceStart = timesliceNE
sliceEnd = timesliceND
ts = "%iN" % (m + 1)
slice = sum(thisMonthOnly[(location)].between_time(sliceStart, sliceEnd))
try:
average_solar = (
(slice / len(thisMonthOnly.between_time(sliceStart, sliceEnd)._values)))
except ZeroDivisionError:
average_solar = 0
dataToInsert += "%s\t%s_%ih_%s\t%s\t%i\t%f\n" % (region, line['Technology'], line['BatteryTime'], location, ts, year, average_solar)
m = m + 1
while m == 11:
currentMonth = months[m]
startDate = "2016-%s-01" % (currentMonth)
endDate = "2016-%s-31" % (months[m])
thisMonthOnly = capacityfactor_solar_battery.loc[startDate:endDate]
sliceStart = timesliceDN
sliceEnd = timesliceDE
ts = "%iD" % (m + 1)
slice = sum(thisMonthOnly[(location)].between_time(sliceStart, sliceEnd))
try:
average_solar = (
(slice / len(thisMonthOnly.between_time(sliceStart, sliceEnd)._values)))
except ZeroDivisionError:
average_solar = 0
dataToInsert += "%s\t%s_%ih_%s\t%s\t%i\t%f\n" % (region, line['Technology'], line['BatteryTime'], location, ts, year, average_solar)
sliceStart = timesliceED
sliceEnd = timesliceEN
ts = "%iE" % (m + 1)
slice = sum(thisMonthOnly[(location)].between_time(sliceStart, sliceEnd))
try:
average_solar = (slice / len(thisMonthOnly.between_time(sliceStart, sliceEnd)._values))
except ZeroDivisionError:
average_solar = 0
dataToInsert += "%s\t%s_%ih_%s\t%s\t%i\t%f\n" % (region, line['Technology'], line['BatteryTime'], location, ts, year, average_solar)
sliceStart = timesliceNE
sliceEnd = timesliceND
ts = "%iN" % (m + 1)
slice = sum(thisMonthOnly[(location)].between_time(sliceStart, sliceEnd))
try:
average_solar = (
(slice / len(thisMonthOnly.between_time(sliceStart, sliceEnd)._values)))
except ZeroDivisionError:
average_solar = 0
dataToInsert += "%s\t%s_%ih_%s\t%s\t%i\t%f\n" % (region, line['Technology'], line['BatteryTime'], location, ts, year, average_solar)
m = m + 1
year = year + 1
#WIND
capacityfactor_windcop = capacityfactor_wind.copy()
capacityfactor_windc = | pd.to_datetime(capacityfactor_windcop['date'], errors='coerce', format='%Y/%m/%d %H:%M') | pandas.to_datetime |
import re
import string
from math import ceil
from operator import itemgetter
from random import randrange
import lime
import lime.lime_tabular
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import shap
from gensim.corpora import Dictionary
from gensim.models import CoherenceModel, nmf
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import (
ENGLISH_STOP_WORDS,
CountVectorizer,
TfidfVectorizer,
)
from sklearn.linear_model import ElasticNet, LogisticRegression, SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import (
accuracy_score,
mean_squared_error,
median_absolute_error,
precision_score,
r2_score,
recall_score,
roc_auc_score,
f1_score,
)
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from datto.CleanText import CleanText
class ModelResults:
def most_similar_texts(
self, X, num_examples, text_column_name, num_topics=None, chosen_stopwords=set()
):
"""
Uses NMF clustering to create n topics based on adjusted word frequencies
Parameters
--------
X: DataFrame
num_examples: int
text_column_name: str
num_topics: int
Optional - if none algorithm will determine best number
Returns
--------
topic_words_df: DataFrame
Top 15 words/phrases per topic
combined_df: DataFrame
Original text with topic number assigned to each
"""
X = X[~X[text_column_name].isna()]
X = X[X[text_column_name] != ""]
X = X[X[text_column_name] != " "]
X = X[X[text_column_name] != "NA"]
X = X[X[text_column_name] != "n/a"]
X = X[X[text_column_name] != "N/A"]
X = X[X[text_column_name] != "na"]
all_stop_words = (
set(ENGLISH_STOP_WORDS)
| set(["-PRON-"])
| set(string.punctuation)
| set([" "])
| chosen_stopwords
)
ct = CleanText()
vectorizer = TfidfVectorizer(
tokenizer=ct.lematize,
ngram_range=(1, 3),
stop_words=all_stop_words,
min_df=5,
max_df=0.4,
)
vectors = vectorizer.fit_transform(X[text_column_name]).todense()
# Adding words/phrases used in text data frequencies back into the dataset (so we can see feature importances later)
vocab = vectorizer.get_feature_names()
vector_df = pd.DataFrame(vectors, columns=vocab, index=X.index)
if X.shape[0] < 20:
return "Too few examples to categorize."
if not num_topics:
# In case 1, add 1 to get at least 2
# The rest are based on eyeballing numbers
min_topics = ceil(X.shape[0] * 0.01) + 1
max_topics = ceil(X.shape[0] * 0.2)
step = ceil((max_topics - min_topics) / 5)
topic_nums = list(np.arange(min_topics, max_topics, step))
texts = X[text_column_name].apply(ct.lematize)
# In gensim a dictionary is a mapping between words and their integer id
dictionary = Dictionary(texts)
# Filter out extremes to limit the number of features
dictionary.filter_extremes(no_below=2, no_above=0.85, keep_n=5000)
# Create the bag-of-words format (list of (token_id, token_count))
corpus = [dictionary.doc2bow(text) for text in texts]
coherence_scores = []
for num in topic_nums:
model = nmf.Nmf(
corpus=corpus,
num_topics=num,
id2word=dictionary,
chunksize=2000,
passes=5,
kappa=0.1,
minimum_probability=0.01,
w_max_iter=300,
w_stop_condition=0.0001,
h_max_iter=100,
h_stop_condition=0.001,
eval_every=10,
normalize=True,
random_state=42,
)
cm = CoherenceModel(
model=model, texts=texts, dictionary=dictionary, coherence="u_mass"
)
coherence_scores.append(round(cm.get_coherence(), 5))
scores = list(zip(topic_nums, coherence_scores))
chosen_num_topics = sorted(scores, key=itemgetter(1), reverse=True)[0][0]
else:
chosen_num_topics = num_topics
model = NMF(n_components=chosen_num_topics, random_state=42)
model.fit(vectors)
component_loadings = model.transform(vectors)
top_topics = pd.DataFrame(
np.argmax(component_loadings, axis=1), columns=["top_topic_num"]
)
top_topic_loading = pd.DataFrame(
np.max(component_loadings, axis=1), columns=["top_topic_loading"]
)
X.reset_index(inplace=True, drop=False)
vector_df.reset_index(inplace=True, drop=True)
# Fix for duplicate text_column_name
vector_df.columns = [x + "_vector" for x in vector_df.columns]
combined_df = pd.concat([X, vector_df, top_topics, top_topic_loading], axis=1)
combined_df.sort_values(by="top_topic_loading", ascending=False, inplace=True)
combined_df = pd.concat([X, vector_df, top_topics], axis=1)
topic_words = {}
sample_texts_lst = []
for topic, comp in enumerate(model.components_):
word_idx = np.argsort(comp)[::-1][:num_examples]
topic_words[topic] = [vocab[i] for i in word_idx]
sample_texts_lst.append(
list(
combined_df[combined_df["top_topic_num"] == topic][
text_column_name
].values[:num_examples]
)
)
topic_words_df = pd.DataFrame(
columns=[
"topic_num",
"num_in_category",
"top_words_and_phrases",
"sample_texts",
]
)
topic_words_df["topic_num"] = [k for k, _ in topic_words.items()]
topic_words_df["num_in_category"] = (
combined_df.groupby("top_topic_num").count().iloc[:, 0]
)
topic_words_df["top_words_and_phrases"] = [x for x in topic_words.values()]
topic_words_df["sample_texts"] = sample_texts_lst
topic_words_explode = pd.DataFrame(
topic_words_df["sample_texts"].tolist(), index=topic_words_df.index,
)
topic_words_explode.columns = [
"example{}".format(num) for num in range(len(topic_words_explode.columns))
]
concated_topics = pd.concat(
[
topic_words_df[
["topic_num", "num_in_category", "top_words_and_phrases"]
],
topic_words_explode,
],
axis=1,
)
print("Topics created with top words & example texts:")
print(concated_topics)
original_plus_topics = combined_df[list(X.columns) + ["index", "top_topic_num"]]
original_with_keywords = pd.merge(
original_plus_topics,
concated_topics[["topic_num", "top_words_and_phrases"]],
left_on="top_topic_num",
right_on="topic_num",
how="left",
).drop("top_topic_num", axis=1)
return (
concated_topics,
original_with_keywords,
model,
)
def coefficients_graph(self, X_train, X_test, model, model_type, filename):
"""
Displays graph of feature importances.
* Number of horizontal axis indicates magnitude of effect on
target variable (e.g. affected by 0.25)
* Red/blue indicates feature value (increasing or decreasing feature
has _ effect)
* Blue & red mixed together indicate there isn't a clear
effect on the target variable
* For classification - interpreting magnitude number / x axis - changes the
predicted probability of y on average by _ percentage points (axis value * 100)
Parameters
--------
X_train: pd.DataFrame
X_test: pd.DataFrame
model: fit model object
model_type: str
'classification' or 'regression'
filename: str
"""
if model_type.lower() == "classification":
f = lambda x: model.predict_proba(x)[:, 1]
else:
f = lambda x: model.predict(x)
med = X_train.median().values.reshape((1, X_train.shape[1]))
explainer = shap.KernelExplainer(f, med)
# Runs too slow if X_test is huge, take a representative sample
if X_test.shape[0] > 1000:
X_test_sample = X_test.sample(1000)
else:
X_test_sample = X_test
shap_values = explainer.shap_values(X_test_sample)
shap.summary_plot(shap_values, X_test_sample)
plt.tight_layout()
plt.savefig(filename)
return shap_values
def most_common_words_by_group(
self, X, text_col_name, group_col_name, num_examples, num_times_min, min_ngram,
):
"""
Get the most commons phrases for defined groups.
Parameters
--------
X: DataFrame
text_col_name: str
group_col_name: str
num_examples: int
Number of text examples to include per group
num_times_min: int
Minimum number of times word/phrase must appear in texts
min_ngram: int
Returns
--------
overall_counts_df: DataFrame
Has groups, top words, and counts
"""
# Fix for when column name is the same as an ngram column name
X["group_column"] = X[group_col_name]
# Remove all other unneeded columns
X = X[[text_col_name, "group_column"]]
all_stop_words = (
set(ENGLISH_STOP_WORDS)
| set(["-PRON-"])
| set(string.punctuation)
| set([" "])
)
cv = CountVectorizer(
stop_words=all_stop_words,
ngram_range=(min_ngram, 3),
min_df=num_times_min,
max_df=0.4,
)
vectors = cv.fit_transform(X[text_col_name]).todense()
words = cv.get_feature_names()
vectors_df = pd.DataFrame(vectors, columns=words)
group_plus_vectors = pd.concat([vectors_df, X.reset_index(drop=False)], axis=1)
count_words = pd.DataFrame(
group_plus_vectors.groupby("group_column").count()["index"]
)
count_words = count_words.loc[:, ~count_words.columns.duplicated()]
# Fix for when "count" is an ngram column
count_words.columns = ["count_ngrams"]
group_plus_vectors = group_plus_vectors.merge(
count_words, on="group_column", how="left"
)
group_plus_vectors["count_ngrams"].fillna(0, inplace=True)
sums_by_col = (
group_plus_vectors[
group_plus_vectors.columns[
~group_plus_vectors.columns.isin([text_col_name, "index",])
]
]
.groupby("group_column")
.sum()
)
sums_by_col.sort_values(by="count_ngrams", ascending=False, inplace=True)
sums_by_col.drop("count_ngrams", axis=1, inplace=True)
array_sums = np.array(sums_by_col)
sums_values_descending = -np.sort(-array_sums, axis=1)
sums_indices_descending = (-array_sums).argsort()
highest_sum = pd.DataFrame(sums_values_descending[:, 0])
highest_sum.columns = ["highest_sum"]
sums_by_col["highest_sum"] = highest_sum["highest_sum"].values
overall_counts_df = pd.DataFrame(columns=["group_name", "top_words_and_counts"])
i = 0
for row in sums_by_col.index:
dict_scores = {}
temp_df = pd.DataFrame(columns=["group_name", "top_words_and_counts"])
temp_df["group_name"] = [row]
top_columns = sums_by_col.columns[
sums_indices_descending[i][:num_examples]
].values
top_counts = sums_values_descending[i][:num_examples]
[dict_scores.update({x: y}) for x, y in zip(top_columns, top_counts)]
temp_df["top_words_and_counts"] = [dict_scores]
overall_counts_df = overall_counts_df.append([temp_df])
print(f"Group Name: {row}\n")
for k, v in dict_scores.items():
print(k, v)
print("\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
i += 1
return overall_counts_df
def score_final_model(
self, model_type, X_test, y_test, trained_model, multiclass=False
):
"""
Score your model on the test dataset. Only run this once to get an idea of how your model will perform in realtime.
Run it after you have chosen your model & parameters to avoid problems with overfitting.
Parameters
--------
model_type: str
X_test: DataFrame
y_test: DataFrame
trained_model: sklearn model
multiclass: bool
Returns
--------
model: model
Fit model
y_predicted: array
"""
# Predict actual scores
y_predicted = trained_model.predict(X_test)
if multiclass:
pscore = precision_score(y_test, y_predicted, average="weighted")
rscore = recall_score(y_test, y_predicted, average="weighted")
f1score = f1_score(y_test, y_predicted, average="weighted")
print(f"Final Model Precision Weighted: {pscore}")
print(f"Final Model Recall Weighted: {rscore}")
print(f"Final Model F1 Weighted: {f1score}")
elif model_type.lower() == "classification":
pscore = precision_score(y_test, y_predicted)
rscore = recall_score(y_test, y_predicted)
accuracy = accuracy_score(y_test, y_predicted)
roc_auc = roc_auc_score(y_test, y_predicted)
print(f"Final Model Precision: {pscore}")
print(f"Final Model Recall: {rscore}")
print(f"Final Model Accuracy: {accuracy}")
print(f"Final Model ROC AUC: {roc_auc}")
crosstab = pd.crosstab(
y_test, y_predicted, rownames=["Actual"], colnames=["Predicted"],
)
print(crosstab)
sum_crosstab = crosstab.to_numpy().sum()
print(
pd.crosstab(
y_test, y_predicted, rownames=["Actual"], colnames=["Predicted"],
).apply(lambda r: round(r / sum_crosstab, 3))
)
else:
mse = mean_squared_error(y_test, y_predicted)
mae = median_absolute_error(y_test, y_predicted)
r2 = r2_score(y_test, y_predicted)
print(f"Mean Negative Root Mean Squared Errror: {(mse ** 5) * -1}")
print(f"Mean Negative Median Absolute Error: {mae * -1}")
print(f"Mean R2: {r2}")
return trained_model, y_predicted
def coefficients_summary(
self,
X,
y,
num_repetitions,
num_coefficients,
model_type,
multiclass=False,
params={},
):
"""
Prints average coefficient values using a regression model.
Parameters
--------
X: DataFrame
y: DataFrame
num_repetitions: int
Number of times to create models
num_coefficients: int
Number of top coefficients to display
model_type: str
'classification' or 'regression'
multiclass: bool
params: dict
Optional - add to change regression params, otherwise use default
Returns
--------
simplified_df: DataFrame
Has mean, median, and standard deviation for coefficients after several runs
"""
coefficients_df = pd.DataFrame(columns=["features", "coefficients"])
X["intercept"] = 1
for _ in range(num_repetitions):
if multiclass:
model = DecisionTreeClassifier()
elif model_type.lower() == "classification":
model = LogisticRegression(fit_intercept=False, **params)
else:
model = ElasticNet(fit_intercept=False, **params)
X_train, _, y_train, _ = train_test_split(X, y)
model.fit(X_train, y_train)
if multiclass:
coefs = model.feature_importances_
elif model_type.lower() == "classification":
coefs = model.coef_[0]
else:
coefs = model.coef_
temp_df = pd.DataFrame(
[x for x in zip(X_train.columns, coefs)],
columns=["features", "coefficients"],
)
coefficients_df = coefficients_df.append(temp_df)
column_of_interest = "coefficients"
summary_coefficients_df = pd.DataFrame(
coefficients_df.groupby("features").agg(
{column_of_interest: ["mean", "std", "median"]}
)
)
summary_coefficients_df.columns = ["mean", "std", "median"]
summary_coefficients_df.reset_index(inplace=True)
value_counts_df = pd.DataFrame(columns=["features"])
for col in X_train.columns:
temp_df = pd.DataFrame([[col,]], columns=["features"],)
value_counts_df = value_counts_df.append(temp_df)
value_counts_df.reset_index(inplace=True, drop=True)
combined_df = summary_coefficients_df.merge(
value_counts_df, on="features", how="left"
)
combined_df["abs_val_mean"] = abs(combined_df["mean"])
combined_df["abs_val_to_se"] = abs(combined_df["mean"]) / combined_df["std"]
combined_df.sort_values("abs_val_to_se", inplace=True, ascending=False)
simplified_df = (
combined_df[["features", "mean", "std", "median", "abs_val_to_se"]]
.head(num_coefficients)
.round(7)
)
# 7 is the most before it flips back to scientific notation
print("Coefficients summary (descending by mean abs se value):")
print(simplified_df)
return simplified_df
def coefficients_individual_predictions(
self,
trained_model,
X_train,
X_test,
id_col,
num_samples,
model_type,
class_names=["False", "True"],
):
def model_preds_adjusted(data):
if model_type.lower() == "classification":
predictions = np.array(trained_model.predict_proba(data))
else:
predictions = np.array(trained_model.predict(data))
return predictions
if model_type.lower() == "classification":
explainer = lime.lime_tabular.LimeTabularExplainer(
np.array(X_train),
feature_names=X_train.columns,
class_names=class_names,
mode="classification",
)
else:
explainer = lime.lime_tabular.LimeTabularExplainer(
np.array(X_train),
feature_names=X_train.columns,
class_names=class_names,
mode="regression",
)
for i in range(num_samples):
user_idx = randrange(X_test.shape[0])
exp = explainer.explain_instance(
np.array(X_test.iloc[user_idx]),
model_preds_adjusted,
num_features=50,
top_labels=10,
)
user_id = X_test.iloc[user_idx][id_col]
print(f"\nUser: {user_id}")
if model_type.lower() == "classification":
prediction = class_names[
trained_model.predict_proba(
| pd.DataFrame(X_test.iloc[user_idx]) | pandas.DataFrame |
import pandas as pd
import numpy as np
from pandas.tseries.offsets import *
import scipy.optimize as opt
import scipy.cluster.hierarchy as sch
from scipy import stats
class FHBacktestAncilliaryFunctions(object):
"""
This class contains a set of ancilliary supporting functions for performing backtests.
They are all static methods meant to be used some place else.
Here is the current list of methods:
resample_dates : for calculating rebalancing dates based on Pandas calendar sampling
expand_static_weights : to transfor static weights series in a dataframe of constant weights over some time index
get_cov_matrix_on_date : calculates covariance matrices
static_weights : static non-negative weights (long-only) for a given weighting scheme
"""
@staticmethod
def resample_dates(index, rebalance):
""" This function resamples index (a DatetimeIndex) in a few different ways:
1. Using the double-letter codes below:
# WW weekly frequency (on Wednesdays)
# WF weekly frequency (on Fridays)
# WM weekly frequency (on Mondays)
# ME month end frequency (last day of the month)
# MM mid-month frequency (10th business days of the month)
# MS month start frequency (first day of the month)
# QE quarter end frequency
# QM quarter end mid-month frequency (10th business days of the end of quarter month)
# QS quarter start frequency (first day of the quarter)
# SE semester end frequency
# SM semester end mid-month frequency (15th of June and 15th of December)
# SS semester start frequency
# YE year end frequency
# YM year end mid-month frequency (15th of December)
# YS year start frequency (first day of the year)
2. Given a custom list or DatetimeIndex with custom rebalancing dates
3. Given a list of months to rebalance as in [2,4,8] for rebalancing in Feb, Apr, and Aug
If the function fails to recognize the resampling method, it will assume ME"""
if isinstance(rebalance, str):
if (rebalance[0] == 'W' and len(rebalance) > 1) or rebalance == 'W':
wd = int(2 * (rebalance[1] == 'W') + 4 * (rebalance[1] == 'F')) if len(rebalance) > 1 else None
rebc = pd.to_datetime([x for x in (index + Week(1, weekday=wd)).unique()])
elif rebalance == 'ME' or rebalance == 'M':
rebc = pd.to_datetime((index + BMonthEnd(1)).unique())
elif rebalance == 'MM':
rebc = pd.to_datetime((index + MonthBegin(0) + BusinessDay(10)).unique())
elif rebalance == 'MS':
# TODO: This is taking the last business day of the month if index + MonthBegin(0) fall on a weekend. Fix this.
rebc = pd.to_datetime((index + MonthBegin(0)).unique())
elif rebalance == 'QE' or rebalance == 'Q':
rebc = pd.to_datetime((index + QuarterEnd(1)).unique())
elif rebalance == 'QM':
rebc = pd.to_datetime((index + QuarterBegin(0) + BusinessDay(10)).unique())
elif rebalance == 'QS':
# TODO: This is taking the last business day of the quarter if index + QuarterBegin(0) fall on a weekend. Fix this.
rebc = pd.to_datetime((index + QuarterBegin(0)).unique())
elif rebalance == 'SE' or rebalance == 'S':
rebc = pd.to_datetime([x for x in (index + BMonthEnd(1)).unique() if x.month in [6, 12]])
elif rebalance == 'SM':
rebc = pd.to_datetime(
[x for x in (index + MonthBegin(0) + BusinessDay(10)).unique() if x.month in [6, 12]])
elif rebalance == 'SS':
# TODO: This is taking the last business day of the semester if index + MonthBegin(0) fall on a weekend. Fix this.
rebc = pd.to_datetime([x for x in (index + MonthBegin(0)).unique() if x.month in [6, 12]])
elif rebalance == 'YE' or rebalance == 'Y':
rebc = pd.to_datetime((index + BYearEnd(1)).unique())
elif rebalance == 'YM':
rebc = pd.to_datetime((index + BYearBegin(0) + BusinessDay(10)).unique())
elif rebalance == 'YS':
# TODO: This is taking the last business day of the semester if index + MonthBegin(0) fall on a weekend. Fix this.
rebc = pd.to_datetime((index + BYearBegin(0)).unique())
else:
print('rebalance string not recognized, assuming month end frequency (last day of the month)')
rebc = pd.to_datetime((index + BMonthEnd(1)).unique())
elif isinstance(rebalance, list):
if all(isinstance(x, type(index[0])) for x in
rebalance): # if the list or DatetimeIndex contains actual dates
rebc = pd.to_datetime(rebalance)
else:
# this will work if the user provided a list with months to rebalance month end frequency (last day of the month
try:
rebc = pd.to_datetime([x for x in (index + BMonthEnd(1)).unique() if x.month in rebalance])
except: # last resort for user provided list
print('Invalid rebalance list, assuming month end frequency (last day of the month)')
rebc = pd.to_datetime((index + BMonthEnd(1)).unique())
else:
print('rebalance parameter not recognized, assuming month end frequency (last day of the month)')
rebc = pd.to_datetime((index + BMonthEnd(1)).unique())
# not necessarily the rebalancing days are valid days, i.e., are in index
# we need to take the rebalancing days that are not in index and alter them
# we alter them to the closest possible date in index
# rebalancing days that are not in index
rebc.freq = None
notin = pd.DatetimeIndex([x for x in rebc if x not in index and x < index.max()],
dtype=rebc.dtype, freq=rebc.freq)
# find the closest day in index
if isinstance(rebalance, str) and len(rebalance) == 2 and rebalance[1] == 'S':
# when dealing with start frequency we want to find the next valid day not only the closest
next_index_day = lambda x: min([d for d in index if d >= x])
alter = [next_index_day(p) for p in notin]
else:
# for the other cases, we just find the closest date
alter = [min(index, key=lambda x: abs(x - p)) for p in notin]
notin = notin.append(pd.DatetimeIndex([x for x in rebc if x > index.max()], dtype=rebc.dtype, freq=rebc.freq))
alter = pd.DatetimeIndex(alter, dtype=rebc.dtype, freq=rebc.freq)
reb = rebc.drop(notin) # drop the invalid rebalancing dates
reb = reb.append(alter) # add the closest days in index
reb = reb.sort_values() # reorder
return reb
@staticmethod
def expand_static_weights(dates_to_expand, weights):
""""
This function transforms static weights in a dataframe of constant weights having dates_to_expand as index
"""
w_df = pd.DataFrame(index=dates_to_expand,
columns=weights.index,
data=np.tile(weights.values, [len(dates_to_expand), 1]))
return w_df
@staticmethod
def get_cov_matrix_on_date(d, ts, h=21, cov_type='rolling', cov_window=756, halflife=60, shrinkage_parameter = 1):
"""
This function calculates the annualized covariance matrix for a given date, r
It does a few things that are important for backtests that are not done by pandas cov functions
1.It will take the unconditional cov matrix if there is too little data (less than cov_window bdays)
This avoids using cov estimates with just a few datapoints (less than cov_window bdays) in the backtest
2.The DataFrame ts can have time series starting a different points in time
The cov estimates will use the pairwise unconditional coveriance matrix if there is too little data given
Parameters
----------
r : a single date value of the same type as the dates in ts.index
ts : a DataFrame with daily time series of index/price levels (not returns!)
h : this is the number of bdays used to calculate returns for estimating the covariance matrix
cov_type : is a string with the type of covariance calulation it can be:
1. rolling (default) : is a rolling window of size cov_window (default is 3 years of data)
2. ewma : is a ewma cov (default halflife is 60 bdays)
3. expanding : is an expanding window from the start of each series
Returns
-------
an annualized covariance matrix based on h period returns
"""
# clean up
ts = ts.astype(float)
ts.index = pd.DatetimeIndex(pd.to_datetime(ts.index))
r = max([x for x in ts.index if x <= pd.to_datetime(d)])
t0 = ts.index[0] # this is when the data starts
unc_cov = np.log(ts).diff(h).cov() * (252 / h) # this is the unconditional covariance matrix annualized
# if the dataframe has less than certain amount of data, use the unconditional covariance matrix
if (r - t0).days < cov_window:
cov = unc_cov.copy()
# if the ts DataFrame has at least some amount of data, use the conditional cov
else:
past_data = ts.shift(1).loc[:r] # note the day lag to not use information not available in the backtesst
if cov_type == 'expanding':
cond_cov = np.log(past_data).diff(h).cov() * (252 / h)
elif cov_type == 'ewma':
# This is roughly similar to a GARCH(1, 1) model:
cond_cov = (np.log(past_data).diff(1).ewm(halflife=halflife).cov().loc[r]) * 252
else:
if cov_type != 'rolling':
print('cov_type not recognized, assuming rolling window of %s bdays' % str(cov_window))
cond_cov = np.log(past_data.iloc[-cov_window:]).diff(h).cov() * (252 / h)
count_past = past_data.count() # this counts how munch data for each series
# take the series that do not have enough data and replace with unconditional estimates
for x in count_past[count_past <= cov_window].index:
cond_cov.loc[x, :] = unc_cov.loc[x, :].values
cond_cov.loc[:, x] = unc_cov.loc[:, x].values
cov = cond_cov.copy()
if shrinkage_parameter >=0 and shrinkage_parameter<1:
vols = pd.Series(index=cov.index,data=np.sqrt(np.diag(cov)))
corr = cov.div(vols, axis=0).div(vols, axis=1)
corr = shrinkage_parameter * corr + (1 - shrinkage_parameter) * np.eye(len(vols))
cov = corr.multiply(vols, axis=0).multiply(vols, axis=1).copy()
return cov
@staticmethod
def static_weights(weighting_scheme, cov=None, vol_target=0.1):
"""
This method calculates static non-negative weights for a given weighting scheme
This method largely makes the functions in portfolio/construction.py obsolete
Parameters
----------
weighting_scheme : this is a string that can take the following values
'IVP' : Inverse Volatility Portfolio
'MVR' : Minimum Variance Portfolio
'ERC' : Equal Risk Contribution Portfolio
'HRP' : Hierarchical Risk Parity from L<NAME> (2016) in the Journal of Portfolio Management
'EW' : Equal weights (this is the fall back case if the string is not recognized)
cov : a DataFrame with the covariance matrix used in all weighting schemes but equal weights
vol_target : only used in the Equal Risk Contribution Portfolio to set the overall volatility of the portfolio
Returns
-------
a Pandas series with static non-negative weights (long-only)
"""
assert isinstance(ts, pd.DataFrame), "input 'cov' must be a pandas DataFrame"
# Inverse Volatility Portfolio
if weighting_scheme == 'IVP':
# non-negative weights are set to be proportional to the inverse of the vol, adding up to one
w = np.sqrt(np.diag(cov))
w = 1 / w
w = w / w.sum()
static_weights = pd.Series(data=w, index=cov.columns)
# Minimum Variance Portfolio
elif weighting_scheme == 'MVR':
# non-negative weights are set to minimize the overall portfolio variance
n = cov.shape[0]
port_variance = lambda x: x.dot(cov).dot(x)
eq_cons = {'type': 'eq', 'fun': lambda w: w.sum() - 1}
bounds = opt.Bounds(0, np.inf)
w0 = np.ones(n) / n
res = opt.basinhopping(port_variance, w0, minimizer_kwargs={'method': 'SLSQP',
'constraints': eq_cons, 'bounds': bounds},
T=1.0,
niter=500,
stepsize=0.5,
interval=50,
disp=False,
niter_success=100)
if not res['lowest_optimization_result']['success']:
raise ArithmeticError('Optimization convergence failed for static MVR weighting scheme')
static_weights = pd.Series(data=res.x, index=cov.columns)
# Equal Risk Contribution Portfolio
elif weighting_scheme == 'ERC':
# non-negative weights are set to for each component to have equal risk contribution
n = cov.shape[0]
target_risk_contribution = np.ones(n) / n
dist_to_target = lambda x: np.linalg.norm(x * (x @ cov / (vol_target ** 2)) - target_risk_contribution)
port_vol = lambda x: np.sqrt(x.dot(cov).dot(x))
eq_cons = {'type': 'eq', 'fun': lambda x: port_vol(x) - vol_target}
bounds = opt.Bounds(0, np.inf)
res = opt.basinhopping(dist_to_target, target_risk_contribution,
minimizer_kwargs={'method': 'SLSQP', 'constraints': eq_cons, 'bounds': bounds},
T=1.0,
niter=500,
stepsize=0.5,
interval=50,
disp=False,
niter_success=100)
if not res['lowest_optimization_result']['success']:
raise ArithmeticError('Optimization convergence failed for static ERC weighting scheme')
static_weights = pd.Series(data=res.x, index=cov.columns)
# Hierarchical Risk Parity
elif weighting_scheme == 'HRP':
# Idea is from <NAME> (2016) in the Journal of Portfolio Management
# Code is from the book Advances in <NAME>(2018), Financial Machine Learning, <NAME> & Sons
vols = np.sqrt(np.diag(cov))
corr = cov.div(vols, axis=0).div(vols, axis=1)
dist = np.sqrt(np.round(((1 - corr) / 2),10))
link = sch.linkage(dist)
# quasi diagonal
link = link.astype(int)
sort_ix = pd.Series([link[-1, 0], link[-1, 1]])
num_items = link[-1, 3]
while sort_ix.max() >= num_items:
sort_ix.index = range(0, sort_ix.shape[0] * 2, 2) # make space
df0 = sort_ix[sort_ix >= num_items] # find clusters
i = df0.index
j = df0.values - num_items
sort_ix[i] = link[j, 0] # item 1
df0 = pd.Series(link[j, 1], index=i + 1)
sort_ix = sort_ix.append(df0) # item 2
sort_ix = sort_ix.sort_index() # re-sort
sort_ix.index = range(sort_ix.shape[0]) # re-index
sort_ix = corr.index[sort_ix.tolist()].tolist()
static_weights = pd.Series(1, index=sort_ix)
c_items = [sort_ix] # initialize all items in one cluster
while len(c_items) > 0:
# bi-section
c_items = [i[j:k] for i in c_items for j, k in ((0, len(i) // 2), (len(i) // 2, len(i))) if
len(i) > 1]
for i in range(0, len(c_items), 2): # parse in pairs
c_items0 = c_items[i] # cluster 1
c_items1 = c_items[i + 1] # cluster 2
# get cluster var for 0
cov_ = cov.loc[c_items0, c_items0] # matrix slice
ivp = 1 / np.diag(cov_)
ivp /= ivp.sum()
w_ = ivp.reshape(-1, 1)
c_var0 = np.dot(np.dot(w_.T, cov_), w_)[0, 0]
# get cluster var for 1
cov_ = cov.loc[c_items1, c_items1] # matrix slice
ivp = 1 / np.diag(cov_)
ivp /= ivp.sum()
w_ = ivp.reshape(-1, 1)
c_var1 = np.dot(np.dot(w_.T, cov_), w_)[0, 0]
alpha = 1 - c_var0 / (c_var0 + c_var1)
static_weights[c_items0] *= alpha # weight 1
static_weights[c_items1] *= 1 - alpha # weight 2
else:
# Equal Weights
if weighting_scheme != 'EW':
print('%s weighting scheme is not recognized, defaulting to static equal weights' % weighting_scheme)
n = cov.shape[0]
static_weights = pd.Series(index=cov.index, data=1 / n)
return static_weights
@staticmethod
def cross_sectional_weights_from_signals(signals, weighting_scheme = 'rank', cov = None, vol_target = 0.1):
"""
This method calculates static long-short weights for a given set of signals
Parameters
----------
signals : a Pandas series containing a set of signals on which assets will be sorted. Typically, we want to
be long and have higher weight on assets with large signals and to be short and have large negative
weights in the assets with low signals
weighting_scheme : this is a string that can take the following values
'zscores' : z-score long-short weights adding up to 200% in absolute value
'winsorized' : same as 'zscores' but with z-scores winsorized at 10th/90th percentile limits
'vol_target' : long-short weights set to achieve a certain volatility target for the entire portfolio
'ERC' : Equal Risk Contribution Portfolio
'IVP' : Inverse Volatility Portfolio
'EW' : Equal Weights
'rank' : Signal Rank Based Portfolio (this is the case if the parameter is not given or not recognized)
cov : a DataFrame with the covariance matrix used in all weighting schemes but equal weights
vol_target : used in the 'vol_target' and 'ERC' weighting schemes to set the overall volatility of the portfolio
Returns
-------
a Pandas series with static long-short weights as type float
"""
assert isinstance(signals, pd.Series), "input 'signals' must be a pandas Series"
assert isinstance(weighting_scheme, str), "input 'weighting_scheme' must be a string"
if weighting_scheme.lower().find('zscores')>-1:
# z-score long-short weights adding up to 200% in absolute value
weights = signals.copy().fillna(0) * 0
scores = pd.Series(index=signals.dropna().index, data=stats.zscore(signals.dropna()))
weights[scores.index] = scores.values
weights = weights / (np.nansum(np.abs(weights)) / 2)
elif weighting_scheme.lower().find('winsorized')>-1:
# z-scores winsorized at 10th/90th percentile limits long-short weights adding up to 200%
weights = signals.copy().fillna(0) * 0
raw_scores = stats.zscore(signals.dropna())
w_scores = stats.mstats.winsorize(raw_scores, limits=.1)
scores = pd.Series(index=signals.dropna().index, data=w_scores)
weights[scores.index] = scores.values
weights = weights / (np.nansum(np.abs(weights)) / 2)
elif weighting_scheme.lower().find('vol_target')>-1:
# long-short weights set to achieve a certain volatility target for the entire portfolio
# maximize the portfolio signal (actually minimize the opposite of that)
port_signal = lambda x: - x.dot(signals.values)
# subject to the portfolio volatility being equal to vol_target
port_vol = lambda x: np.sqrt(x.dot(cov).dot(x)) - vol_target
eq_cons = {'type': 'eq', 'fun': lambda x: port_vol(x)}
# initialize optimization with rank-based portfolio
ranks = signals.rank()
w0 = ranks - ranks.mean()
w0 = w0 / (np.nansum(np.abs(w0)) / 2)
# bounds are set in order to be long/short what the rank based portfolio tells us to be long/short
# the maximum weight in absolute value is the maximum weight in the rank-based portfolio
bounds = pd.DataFrame(index=signals.index, columns=['lower', 'upper'])
bounds['lower'] = np.array([np.sign(w0) * max(np.abs(w0)), np.zeros(w0.shape)]).min(axis=0)
bounds['upper'] = np.array([np.sign(w0) * max(np.abs(w0)), np.zeros(w0.shape)]).max(axis=0)
res = opt.basinhopping(port_signal, np.nan_to_num(w0.values),
minimizer_kwargs={'method': 'SLSQP', 'constraints': eq_cons, 'bounds': bounds.values},
T=1.0,
niter=500,
stepsize=0.5,
interval=50,
disp=False,
niter_success=100)
if not res['lowest_optimization_result']['success']:
raise ArithmeticError('Optimization convergence failed for volatility target weighting scheme')
weights = pd.Series(index=signals.index, data = np.nan_to_num(res.x))
elif weighting_scheme.find('ERC')>-1:
# Equal Risk Contribution Portfolio
# minimize the distance to the equal risk portfolio
n = cov.shape[0]
target_risk_contribution = np.ones(n) / n
dist_to_target = lambda x: np.linalg.norm(x * (x @ cov / (vol_target ** 2)) - target_risk_contribution)
# subject to the portfolio volatility being equal to vol_target
port_vol = lambda x: np.sqrt(x.dot(cov).dot(x))
eq_cons = {'type': 'eq', 'fun': lambda x: port_vol(x) - vol_target}
# initialize optimization with rank-based portfolio
ranks = signals.rank()
w0 = ranks - ranks.mean()
w0 = w0 / (np.nansum(np.abs(w0)) / 2)
# bounds are set in order to be long/short what the rank based portfolio tells us to be long/short
# the maximum weight in absolute value is the maximum weight in the rank-based portfolio
bounds = pd.DataFrame(index=signals.index, columns=['lower', 'upper'])
bounds['lower'] = np.array([np.sign(w0) * max(np.abs(w0)), np.zeros(w0.shape)]).min(axis=0)
bounds['upper'] = np.array([np.sign(w0) * max(np.abs(w0)), np.zeros(w0.shape)]).max(axis=0)
res = opt.basinhopping(dist_to_target, target_risk_contribution,
minimizer_kwargs={'method': 'SLSQP', 'constraints': eq_cons, 'bounds': bounds.values},
T=1.0,
niter=500,
stepsize=0.5,
interval=50,
disp=False,
niter_success=100)
if not res['lowest_optimization_result']['success']:
raise ArithmeticError('Optimization convergence failed for ERC weighting scheme')
weights = pd.Series(index=signals.index, data=np.nan_to_num(res.x))
elif weighting_scheme.find('IVP')>-1:
# Inverse Volatility Portfolio
ranks = signals.rank()
weights = ranks - ranks.mean()
vols = pd.Series(index=cov.index, data=np.sqrt(np.diag(cov)))
weights = np.sign(weights) / vols
weights = weights / (np.nansum(np.abs(weights)) / 2)
elif weighting_scheme == 'EW':
# Equal Weights
ranks = signals.rank()
weights = ranks - ranks.mean()
weights = np.sign(weights) / signals.shape[0]
weights = weights / (np.nansum(np.abs(weights)) / 2)
else:
# Signal Rank Based Portfolio
if weighting_scheme.lower().find('rank')== -1:
print('Unclear weighting scheme, assuming signal-rank based weights')
ranks = signals.rank()
weights = ranks - ranks.mean()
weights = weights / (np.nansum(np.abs(weights)) / 2)
return weights.astype(float)
class FHLongOnlyWeights(object):
"""
Implements long-only portfolio strategies
Attributes
----------
underlyings : a list or index with the name of the underlyings of the strategy
ts : a Pandas DataFrame containing the indexed time series of returns for a set of underlying trackers
rebalance_dates : DatetimeIndex with the rebalancing dates of the strategy
weights : a Pandas DataFrame containing the time series of notional allocation (weights) on each underlying tracker
for each rebalancing date
holdings : a Pandas DataFrame containing the time series of the quantitiy held
on each underlying tracker on all dates
pnl : a Pandas Series containing the time series of the daily pnl of the strategy
backtest : a Pandas Series containing the time series of the indexed cumulative pnl of the strategy
Methods
----------
_rescale_weights : rescale the weights to achieve a certain objective when new assets come into the portfolio.
Current options are:
'to_one' : rescale weights add to one
'vol' : rescale weights to meet a certain volatility target
'notional' : weights are rescaled but keep the same notional as before
run_backtest : runs the strategy, calculating the performance and the attributes backtest, pnl and holdings
It also returns the backtest as a Pandas DataFrame
"""
def __init__(self, ts, DTINI='1997-12-31', DTEND='today', static = True,
weighting_scheme = 'IVP', rebalance='M', rescale_weights = False, vol_target = 0.1,
cov_type='rolling', cov_period=21, cov_window=756, halflife=60):
"""
This class implements long-only portfolio strategies.
Parameters
----------
ts : a Pandas DataFrame containing the indexed time series of returns for a set of trackers.
The time series do not need to start or end all at the same time and the code deals with missing data
DTINI : a string containing the initial date for the backtest (default is '1997-12-31')
DTEND : a string containing the end date for the backtest (default is 'today')
static : a Boolean where True is if the strategy has static weights (default) and False otherwise
weighting_scheme : a string that defines the strategy weighting scheme to be used as argument on
the static_weights method in the FHBacktestAncilliaryFunctions class.
See FHBacktestAncilliaryFunctions.static_weights for different weighting scheme options.
Inverse Volatility Portfolio is the default weighting scheme.
rebalance : a string, list or DatetimeIndex that defines the rebalancing frequency of the strategy.
The string is used as argument on the resample_dates method in the FHBacktestAncilliaryFunctions
class. See FHBacktestAncilliaryFunctions.resample_dates for rebalancing options
Month-end rebalancing is the default rebalancing scheme.
rescale_weights : a Boolean or string that is used as an argument in the _rescale_weights method.
If True, or not recognized, the weights will be rescaled to add up to one when
new underlyings come into the portfolio.
Other options are described above in the _rescale_weights method description
vol_target : a float used in some weighting schemes to set the overall volatility of the portfolio
cov_type : is a string with the type of covariance calulation. See the cov_type parameter on the
get_cov_matrix_on_date method of the FHBacktestAncilliaryFunctions class.
The default is 'rolling'
cov_period : an integer used to calculate the rolling returns that will be used in the covariance calculation.
See the h parameter on the get_cov_matrix_on_date method of the FHBacktestAncilliaryFunctions class.
The default is 21 business days, so 1 month of rolling returns
cov_window : an integer used to determine how far back in history to calculate the rolling returns that
will be used in the covariance calculation. See the cov_window parameter on the
get_cov_matrix_on_date method of the FHBacktestAncilliaryFunctions class
The default is 756 business days, so 3 years of data
halflife : for cov_type equal to 'ewma' a halflife paramter may be specified. See the cov_window parameter
on the get_cov_matrix_on_date method of the FHBacktestAncilliaryFunctions class.
The default is 60 bdays, about 3 months, if no parameter is specified
"""
assert isinstance(ts, pd.DataFrame), "input 'ts' must be a pandas DataFrame"
assert isinstance(rescale_weights, bool) or isinstance(rescale_weights, str),\
"input 'rescale_weights' must be boolean or string"
# store the names of the underlyings
self.underlyings = ts.columns
# fill na's and store time series data
ts = ts.copy().fillna(method='ffill').dropna(how='all')
ts.index = pd.DatetimeIndex(pd.to_datetime(ts.index))
relevant_time_period = pd.DatetimeIndex([t for t in ts.index if
pd.to_datetime(DTINI) <= t <= pd.to_datetime(DTEND)])
self.ts = ts.loc[relevant_time_period]
# find and store the rebalancing dates
baf = FHBacktestAncilliaryFunctions()
self.rebalance_dates = baf.resample_dates(relevant_time_period, rebalance)
# find weights
if static: # static weights case, so same weights every rebalance date
try:
cov = baf.get_cov_matrix_on_date(ts.dropna().index[-1], ts, h=cov_period,
cov_type='expanding', cov_window=cov_window, halflife=halflife)
static_weights = baf.static_weights(weighting_scheme, cov, vol_target=vol_target)
except: # fall back to equal weights if weighting_scheme parameters is not recognized
print('%s weighting scheme is not recognized, defaulting to static equal weights' % weighting_scheme)
weighting_scheme = 'EW'
static_weights = baf.static_weights(weighting_scheme)
self.weights = baf.expand_static_weights(self.rebalance_dates, static_weights)
else:
dynamic_weights = pd.DataFrame(index=self.rebalance_dates,columns=ts.columns)
for r in dynamic_weights.index:
cov = baf.get_cov_matrix_on_date(r, ts, cov_type=cov_type, h=cov_period,
cov_window=cov_window, halflife=halflife)
static_weights = baf.static_weights(weighting_scheme, cov, vol_target=vol_target)
dynamic_weights.loc[r] = static_weights.values
self.weights = dynamic_weights.copy()
# default to one if rescale_weights = True
rsw_string = 'to_one' if isinstance(rescale_weights, bool) and rescale_weights else rescale_weights
if rsw_string and static:
# if raw_string is True or a string, this will be true
# also, only makes sense to re-scale static weights, dynamic weights are already rescaled
if rescale_weights == True: # only print this if boolean True
print('type of re-scaling not given, rescalling to one')
self._rescale_weights(by=rsw_string, vol_target=vol_target, cov_type=cov_type,
h=cov_period, cov_window=cov_window, halflife=halflife)
def _rescale_weights(self, by='to_one', vol_target=0.1, h=21, cov_type='rolling', cov_window=756, halflife=60):
""""
This function transforms static weights in a dataframe of constant weights having dates_to_expand as index
Parameters
----------
by : method by which weights are supposed to be re-scaled
'to_one' : rescale weights add to one
'vol' : rescale weights to meet a certain volatility target
'notional' : weights are rescaled but keep the same notional as before
for other parameters see get_cov_matrix_on_date method of the FHBacktestAncilliaryFunctions class
"""
r_weights = (self.ts.reindex(self.weights.index).notnull()*self.weights).copy()
if by == 'notional':
notional = self.weights.dropna().iloc[-1].sum()
k = notional / r_weights.sum(axis=1)
r_weights = r_weights.fillna(0).multiply(k,axis=0)
elif by == 'vol':
num_assets_in_reb_date = self.ts.reindex(self.weights.index).dropna(how='all').count(axis=1)
for r in num_assets_in_reb_date.index:
if num_assets_in_reb_date.diff(1).loc[r] != 0:
active_assets = r_weights.loc[r][r_weights.loc[r] != 0].index
cov = FHBacktestAncilliaryFunctions.get_cov_matrix_on_date(r, self.ts[active_assets],
h=h, cov_type=cov_type, cov_window=cov_window, halflife=halflife)
rescale_factor = vol_target / np.sqrt((r_weights.loc[r,active_assets] @ cov) @ r_weights.loc[r,active_assets])
r_weights.loc[r] = rescale_factor * r_weights.loc[r]
else:
r_weights.loc[r] = r_weights.loc[:r].iloc[-2].values
else:
if by != 'to_one':
print('type of re-scaling not recognized, rescalling to one')
k = 1 / r_weights.sum(axis=1)
r_weights = r_weights.fillna(0).multiply(k,axis=0)
self.weights = r_weights.copy().fillna(method='ffill').dropna(how='all')
def run_backtest(self, backtest_name = 'backtest'):
""""
Runs the strategy, calculating the performance and the attributes backtest, pnl and holdings
The resulting single-column Pandas DataFrame with the backtest will be stored in the backtest attribute
with backtest_name as sole column name
"""
# TODO: incorporate transaction costs
# set up backtest series. Same calendar as the underlying time series and indexed to start at one
self.backtest = pd.Series(index=self.ts.index)
self.backtest.iloc[0] = 1
# set up pnl series. Same calendar as the underlying time series and indexed to start at zero pnl on day one
self.pnl = pd.Series(index=self.ts.index)
self.pnl.iloc[0] = 0
# take the first set of weights available and use those at the start of the backtest
if min(self.weights.index)>min(self.ts.index):
w0 = pd.DataFrame(columns=[min(self.ts.index)], index=self.weights.columns, data=self.weights.iloc[0].values)
self.weights = self.weights.append(w0.T).sort_index()
# set up the DataFrame that will store the quantities of each underlying held during the backtest
self.holdings = pd.DataFrame(index=self.ts.index,columns=self.ts.columns)
self.holdings.iloc[0] = self.weights.iloc[0] / self.ts.iloc[0] # first trade
# loop over days, running the strategy
for t, tm1 in zip(self.backtest.index[1:], self.backtest.index[:-1]):
# calculate pnl as q x change in price
prices_t = self.ts.loc[:t].iloc[-1]
previous_prices = self.ts.loc[:tm1].iloc[-1]
self.pnl[t] = (self.holdings.loc[tm1].copy() * (prices_t -previous_prices)).sum()
# acumulate the pnl in the backtest series
self.backtest[t] = self.backtest[tm1] + self.pnl[t]
# check if it is a rebalancing day, if so, recalculate the holdings based on new weights, i.e., rebalance
if t in self.weights.index:
self.holdings.loc[t] = self.backtest.loc[tm1]*self.weights.loc[t] / self.ts.loc[t]
else:
self.holdings.loc[t] = self.holdings.loc[tm1].copy()
self.backtest = self.backtest.astype(float).to_frame(backtest_name).copy()
return self.backtest
class FHSignalBasedWeights(object):
"""
Implements long-short portfolio strategies
Attributes
----------
underlyings : a list or index with the name of the underlyings of the strategy
ts : a Pandas DataFrame containing the indexed time series of returns for a set of underlying trackers
rebalance_dates : DatetimeIndex with the rebalancing dates of the strategy
weights : a Pandas DataFrame containing the time series of notional allocation (weights) on each underlying tracker
for each rebalancing date
holdings : a Pandas DataFrame containing the time series of the quantitiy held
on each underlying tracker on all dates
traded_notional : a Pandas DataFrame containing the notional traded on each underlying on each date of the strategy
pnl : a Pandas Series containing the time series of the daily pnl of the strategy
backtest : a Pandas Series containing the time series of the indexed cumulative pnl of the strategy
Methods
----------
run_backtest : runs the strategy, calculating the performance and the attributes backtest, pnl and holdings
It also returns the backtest as a Pandas DataFrame
"""
def __init__(self, ts, signals, DTINI='1997-12-31', DTEND='today',
weighting_scheme = 'IVP', rebalance='M', vol_target = 0.1,
cov_type='rolling', cov_period=21, cov_window=756, halflife=60):
"""
This class implements long-short portfolio strategies.
Parameters
----------
ts : a Pandas DataFrame containing the indexed time series of returns for a set of trackers.
The time series do not need to start or end all at the same time and the code deals with missing data
signals : a Pandas DataFrame containing the time series of the signals used to go long or short each underlying
The time series do not need to start or end all at the same time and the code deals with missing data
DTINI : a string containing the initial date for the backtest (default is '1997-12-31')
DTEND : a string containing the end date for the backtest (default is 'today')
weighting_scheme : a string that defines the strategy weighting scheme to be used as argument on
the cross_sectional_weights_from_signals method in the FHBacktestAncilliaryFunctions class.
See FHBacktestAncilliaryFunctions.cross_sectional_weights_from_signals for different
weighting scheme options.
rebalance : a string, list or DatetimeIndex that defines the rebalancing frequency of the strategy.
The string is used as argument on the resample_dates method in the FHBacktestAncilliaryFunctions
class. See FHBacktestAncilliaryFunctions.resample_dates for rebalancing options
Month-end rebalancing is the default rebalancing scheme.
vol_target : a float used in some weighting schemes to set the overall volatility of the portfolio
cov_type : is a string with the type of covariance calulation. See the cov_type parameter on the
get_cov_matrix_on_date method of the FHBacktestAncilliaryFunctions class.
The default is 'rolling'
cov_period : an integer used to calculate the rolling returns that will be used in the covariance calculation.
See the h parameter on the get_cov_matrix_on_date method of the FHBacktestAncilliaryFunctions class.
The default is 21 business days, so 1 month of rolling returns
cov_window : an integer used to determine how far back in history to calculate the rolling returns that
will be used in the covariance calculation. See the cov_window parameter on the
get_cov_matrix_on_date method of the FHBacktestAncilliaryFunctions class
The default is 756 business days, so 3 years of data
halflife : for cov_type equal to 'ewma' a halflife paramter may be specified. See the cov_window parameter
on the get_cov_matrix_on_date method of the FHBacktestAncilliaryFunctions class.
The default is 60 bdays, about 3 months, if no parameter is specified
"""
assert isinstance(ts, pd.DataFrame), "input 'ts' must be a pandas DataFrame"
assert isinstance(signals, pd.DataFrame), "input 'signals' must be a pandas DataFrame"
# store the names of the underlyings
self.underlyings = pd.Index([x for x in ts.columns if x in signals.columns])
# fill na's and store time series data
ts = ts.copy().fillna(method='ffill').dropna(how='all')
ts.index = pd.DatetimeIndex(pd.to_datetime(ts.index))
t0 = max(signals.index.min(),pd.to_datetime(DTINI))
relevant_time_period = pd.DatetimeIndex([t for t in ts.index if t0 <= t <= pd.to_datetime(DTEND)])
self.ts = ts.loc[relevant_time_period,self.underlyings]
# find and store the rebalancing dates
baf = FHBacktestAncilliaryFunctions()
self.rebalance_dates = baf.resample_dates(relevant_time_period, rebalance)
# get weights according to given weighting scheme
dynamic_weights = pd.DataFrame(index=self.rebalance_dates, columns=self.underlyings)
for r in dynamic_weights.index:
if weighting_scheme in ['vol_target','ERC','IVP']:
cov = baf.get_cov_matrix_on_date(r, ts, h=cov_period, cov_type=cov_type,
cov_window=cov_window, halflife=halflife)
else:
cov = None
static_weights = baf.cross_sectional_weights_from_signals(signals.loc[r],
weighting_scheme=weighting_scheme,
cov=cov, vol_target=vol_target)
dynamic_weights.loc[r] = static_weights.values
self.weights = dynamic_weights.copy()
def run_backtest(self, backtest_name = 'backtest', holdings_costs_bps_pa = 0, rebalance_costs_bps = 0):
""""
Runs the strategy, calculating the performance and the attributes backtest, pnl and holdings
Parameters
----------
backtest_name : a string. The resulting single-column Pandas DataFrame with the backtest will be stored in the backtest attribute
with backtest_name as sole column name
holdings_costs_bps_pa : a Pandas Series with the cost per year in bps of holding 100% notional of each underlying
if a float or an integer is given, that number will be used for all underlyings
Default is zero holdings costs
rebalance_costs_bps : a Pandas Series with the cost of trading in/out of 100% notional of each underlying
if a float or an integer is given, that number will be used for all underlyings
Default is zero holdings costs
"""
# set up backtest series. Same calendar as the underlying time series and indexed to start at one
self.backtest = pd.Series(index=self.ts.index)
self.backtest.iloc[0] = 1
# set up pnl series. Same calendar as the underlying time series and indexed to start at zero pnl on day one
self.pnl = pd.Series(index=self.ts.index)
self.pnl.iloc[0] = 0
# take the first set of weights available and use those at the start of the backtest
if min(self.weights.index)>min(self.ts.index):
w0 = pd.DataFrame(columns=[min(self.ts.index)], index=self.weights.columns, data=self.weights.iloc[0].values)
self.weights = self.weights.append(w0.T).sort_index()
# set up the DataFrame that will store the quantities of each underlying held during the backtest
self.holdings = pd.DataFrame(index=self.ts.index,columns=self.ts.columns)
self.holdings.iloc[0] = self.weights.iloc[0] / self.ts.iloc[0]
# set up the DataFrame that will store the traded notionals of each underlying per day
self.traded_notional = | pd.DataFrame(index=self.ts.index,columns=self.ts.columns,data=0) | pandas.DataFrame |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from nntransfer.analysis.results.base import Analyzer
from nntransfer.analysis.plot import plot
class BiasTransferAnalyzer(Analyzer):
def generate_table(
self,
objective=("Test", "img_classification", "accuracy"),
last_n=0,
label_steps=False,
):
row_list = []
for desc, results in self.data.items():
if label_steps:
name_split = desc.name.split(" ")
name = " ".join(name_split[:-1])
labels = name_split[-1][1:-1].split(";")
else:
name, labels = (desc.name, None)
row = {"name": name}
levels = sorted(list(results.keys()))
if last_n:
levels = levels[(-1) * last_n :]
for level, tracker in results.items():
try:
if level in levels:
l = levels.index(level)
if labels:
l = labels[l]
row[l] = tracker.get_current_objective(objective)
except:
pass # no valid entry for this objective
row_list.append(row)
df = | pd.DataFrame(row_list) | pandas.DataFrame |
import json
import os
from collections import defaultdict
from typing import Dict
import numpy as np
import pandas as pd
from scipy.optimize import linear_sum_assignment
from scipy.spatial import KDTree, distance_matrix
from .constants import PIX_TO_M, MAX_OBJECT_LENGTH_M
__all__ = ["drop_low_confidence_preds", "official_metric_scoring", "score_thresholded"]
def official_metric_scoring_per_scene(pred, gt, shore_root, objectness_threshold, is_vessel_threshold, is_fishing_threshold):
scores_per_scene = defaultdict(list)
for scene_id in gt.scene_id.unique():
scores = official_metric_scoring(
pred[pred.scene_id == scene_id].reset_index(drop=True), gt[gt.scene_id == scene_id].reset_index(drop=True), shore_root
)
scores["objectness_threshold"] = float(objectness_threshold)
scores_for_global_thresholds = scores[
(scores.objectness_threshold == objectness_threshold)
& (scores.is_vessel_threshold == is_vessel_threshold)
& (scores.is_fishing_threshold == is_fishing_threshold)
]
local_thresholds_score = scores.loc[scores["aggregate"].idxmax(), "aggregate"]
local_is_vessel_threshold = scores.loc[scores["aggregate"].idxmax(), "is_vessel_threshold"]
local_is_fishing_threshold = scores.loc[scores["aggregate"].idxmax(), "is_fishing_threshold"]
scores_per_scene["scene_id"].append(scene_id)
scores_per_scene["global_thresholds_score"].append(scores_for_global_thresholds["aggregate"].values[0])
scores_per_scene["global_is_vessel_threshold"].append(is_vessel_threshold)
scores_per_scene["global_is_fishing_threshold"].append(is_fishing_threshold)
scores_per_scene["local_thresholds_score"].append(local_thresholds_score)
scores_per_scene["local_is_vessel_threshold"].append(local_is_vessel_threshold)
scores_per_scene["local_is_fishing_threshold"].append(local_is_fishing_threshold)
return pd.DataFrame.from_dict(scores_per_scene)
def official_metric_scoring(pred, gt, shore_root) -> pd.DataFrame:
inference = drop_low_confidence_preds(pred, gt, distance_tolerance=200, costly_dist=True)
ground_truth = gt[gt["confidence"].isin(["HIGH", "MEDIUM"])].reset_index(drop=True)
scores = score_multithreshold(inference, ground_truth, shore_root, distance_tolerance=200, shore_tolerance=2, costly_dist=True)
return scores
def drop_low_confidence_preds(pred, gt, distance_tolerance=200, costly_dist=False):
"""
Matches detections in a predictions dataframe to a ground truth data frame and isolate the low confidence matches
Args:
preds (pd.DataFrame): contains inference results for a
single scene
gt (pd.DataFrame): contains ground truth labels for a single
scene
distance_tolerance (int, optional): Maximum distance
for valid detection. Defaults to 200.
costly_dist (bool): whether to assign 9999999 to entries in the
distance metrics greater than distance_tolerance; defaults to False
Returns:
df_out (pd.DataFrame): preds dataframe without the low confidence matches
"""
low_inds = []
# For each scene, obtain the tp, fp, and fn indices for maritime
# object detection in the *global* pred and gt dataframes
for scene_id in gt["scene_id"].unique():
pred_sc = pred[pred["scene_id"] == scene_id]
gt_sc = gt[gt["scene_id"] == scene_id]
low_inds_scene = match_low_confidence_preds(pred_sc, gt_sc, distance_tolerance=distance_tolerance, costly_dist=costly_dist)
low_inds += low_inds_scene
# Check matched pairs came from "LOW" labels
for pair in low_inds:
assert gt.iloc[pair["gt_idx"]]["confidence"] == "LOW", f"Index {pair['gt_idx']} is {gt.iloc[pair['gt_idx']]['confidence']}"
low_pred_inds = [a["pred_idx"] for a in low_inds]
df_out = pred.drop(index=low_pred_inds)
df_out = df_out.reset_index()
return df_out
def match_low_confidence_preds(preds, gt, distance_tolerance=200, costly_dist=False):
"""
Matches detections in a predictions dataframe to a ground truth data frame and isolate the low confidence matches
Args:
preds (pd.DataFrame): contains inference results for a
single scene
gt (pd.DataFrame): contains ground truth labels for a single
scene
distance_tolerance (int, optional): Maximum distance
for valid detection. Defaults to 200.
costly_dist (bool): whether to assign 9999999 to entries in the
distance metrics greater than distance_tolerance; defaults to False
Returns:
low_inds (list, int): list of indices for the preds dataframe that are
associated as (1) correct detection in the *global* preds dataframe; (2) low confidence in the corresponding gt dataframe
"""
# Getting pixel-level predicted and ground-truth detections
pred_array = np.array(list(zip(preds["detect_scene_row"], preds["detect_scene_column"]))).reshape((-1, 2))
gt_array = np.array(list(zip(gt["detect_scene_row"], gt["detect_scene_column"]))).reshape((-1, 2))
# Getting a list of index with LOW in the ground truth dataframe
low_gt_inds = list(gt[gt["confidence"] == "LOW"].index)
# Building distance matrix using Euclidean distance pixel space
# multiplied by the UTM resolution (10 m per pixel)
dist_mat = distance_matrix(pred_array, gt_array, p=2) * PIX_TO_M
if costly_dist:
dist_mat[dist_mat > distance_tolerance] = 9999999 * PIX_TO_M
# Using Hungarian matching algorithm to assign lowest-cost gt-pred pairs
rows, cols = linear_sum_assignment(dist_mat)
low_inds = [
{"pred_idx": preds.index[rows[ii]], "gt_idx": gt.index[cols[ii]]}
for ii in range(len(rows))
if (dist_mat[rows[ii], cols[ii]] < distance_tolerance) and (gt.index[cols[ii]] in low_gt_inds)
]
return low_inds
def get_shoreline_shoreline_contours(shoreline_root, scene_id) -> np.ndarray:
shoreline_places = [
f"{shoreline_root}/{scene_id}_shoreline.npy",
f"{shoreline_root}/train/{scene_id}_shoreline.npy",
f"{shoreline_root}/validation/{scene_id}_shoreline.npy",
]
shoreline_contours = None
for shoreline_path in shoreline_places:
if os.path.isfile(shoreline_path):
shoreline_contours = np.load(shoreline_path, allow_pickle=True)
break
if shoreline_contours is None:
raise RuntimeError("Could not locate shoreline_contours path")
if len(shoreline_contours):
contour_points = np.vstack(shoreline_contours)
return contour_points.reshape((-1, 2))
else:
return np.array([]).reshape((-1, 2))
def get_shore_preds(df, shoreline_root, scene_id, shore_tolerance_km):
"""
Getting detections that are close to the shoreline
Args:
df (pd.DataFrame): dataframe containing detections
shoreline_root (str): path to shoreline contour files
scene_id (str): scene_id
shore_tolerance_km (float): "close to shore" tolerance in km
Returns:
df_close (pd.DataFrame): subset of df containing only detections close to shore
"""
# Loading shoreline contours for distance-to-shore calculation
shoreline_contours = get_shoreline_shoreline_contours(shoreline_root, scene_id)
# If there are no shorelines in the scene
if len(shoreline_contours) == 0 or len(df) == 0:
return | pd.DataFrame() | pandas.DataFrame |
"""
Functions useful in finance related applications
"""
import numpy as np
import pandas as pd
import datetime
import dateutil.relativedelta as relativedelta
def project_to_first(dt):
return datetime.datetime(dt.year, dt.month, 1)
def multiple_returns_from_levels_vec(df_in, period=1):
df_out = df = (df_in - df_in.shift(period)) / df_in.shift(period)
return df_out
def df_restrict_dates(df_in, start_date, end_date, multi_index=False, date_name='date'):
"""
restrict input dataframe to certain date range
boundaries are inclusive
index must be in date format
:param df_in: pandas data frame, index must be in datetime format; can deal with multi-index now as well
:param start_date: datetime.datetime (date or certain string formats might also work)
:param end_date: datetime.datetime (date or certain string formats might also work)
:return: reduced dateframe
"""
df_out = df_in.copy()
if multi_index:
mask = (df_out.index.get_level_values(date_name) >= start_date) & \
(df_out.index.get_level_values(date_name) <= end_date)
else:
mask = (df_out.index >= start_date) & (df_out.index <= end_date)
return df_out[mask]
def levels_from_returns(df_in, infield='return', outfield='level', starting_level=1, frequency='daily',
initial_date=None):
assert frequency in ['daily', 'monthly', 'quarterly'], 'not implemented'
start_date = df_in.index.min()
df_out = df_in[[infield]].copy()
if initial_date is None:
if frequency == 'daily':
initial_date = start_date + relativedelta.relativedelta(days=-1)
if frequency == 'monthly':
initial_date = start_date + relativedelta.relativedelta(months=-1)
if frequency == 'quarterly':
initial_date = start_date + relativedelta.relativedelta(months=-3)
df_out.loc[initial_date] = starting_level
df_out.sort_index(ascending=True, inplace=True)
df_out[outfield + '_temp'] = compute_levels(starting_level, df_in[infield].values)
df_out.drop(infield, axis=1, inplace=True)
df_out.rename(columns={outfield + '_temp': outfield}, inplace=True)
return df_out
def monthly_returns(df_in, field='Close', out_name='monthly_return', day_of_month='last'):
assert day_of_month in ['first', 'last'], 'not implemented'
start_date = df_in.index.min()
end_date = df_in.index.max()
shift_start_date = start_date + relativedelta.relativedelta(months=1)
first_date_returns = datetime.datetime(shift_start_date.year, shift_start_date.month, 1)
last_date_returns = datetime.datetime(end_date.year, end_date.month, 1)
date = first_date_returns
l_monthly_returns = []
l_dates = []
while date <= last_date_returns:
this_year = date.year
this_month = date.month
final_day = find_day_in_month(df_in.index, this_year, this_month, which=day_of_month)
mask = df_in.index == final_day
final_val = df_in[mask][field].iloc[0]
prev_date = date + relativedelta.relativedelta(months=-1)
prev_year = prev_date.year
prev_month = prev_date.month
initial_day = find_day_in_month(df_in.index, prev_year, prev_month, which=day_of_month)
mask = df_in.index == initial_day
prev_val = df_in[mask][field].iloc[0]
#print(prev_initial_day, prev_val)
if abs(prev_val) > 0.0:
monthly_return = (final_val - prev_val) / prev_val
else:
monthly_return = np.nan
l_monthly_returns.append(monthly_return)
l_dates.append(date)
date += relativedelta.relativedelta(months=1)
df_out = | pd.DataFrame({out_name: l_monthly_returns}, index=l_dates) | pandas.DataFrame |
#!/usr/bin/env python3
"""
Tests the integration between:
- grand_trade_auto.model.model_meta
- grand_trade_auto.orm.orm_meta
While unit tests already test this integration to some degree, this is to a more
exhaustive degree. Those unit tests are mostly using integrative approaches due
to minimizing mock complexity and making tests practically useful, but do aim to
minimize how much of the integration is invoked.
Per [pytest](https://docs.pytest.org/en/reorganize-docs/new-docs/user/naming_conventions.html),
all tiles, classes, and methods will be prefaced with `test_/Test` to comply
with auto-discovery (others may exist, but will not be part of test suite
directly).
Module Attributes:
logger (Logger): Logger for this module (used for testing).
(C) Copyright 2022 <NAME>. All Rights Reserved Worldwide.
"""
#pylint: disable=protected-access # Allow for purpose of testing those elements
#pylint: disable=use-implicit-booleaness-not-comparison
# +-> want to specifically check type in most tests -- `None` is a fail
import copy
import logging
import pandas as pd
import psycopg2.errors
import pytest
from grand_trade_auto.model import model_meta
from grand_trade_auto.orm import orm_meta
logger = logging.getLogger(__name__)
class ModelTest(model_meta.Model):
"""
A test model to use for testing within this module.
"""
_table_name = 'test_model_meta'
# NOTE: Order of attributes swapped to trick dupe code pylint check
_read_only_columns = (
'col_auto_ro',
)
_columns = (
'id',
'col_1',
'col_2',
'col_auto_ro',
)
# Column Attributes -- MUST match _columns!
# id defined in super
col_1 = None
col_2 = None
col_auto_ro = None
# End of Column Attributes
def __copy__(self):
"""
Return an effective shallow copy for these testing purposes.
"""
shallow_copy = ModelTest(self._orm)
for attr in ['id', 'col_1', 'col_2', 'col_auto_ro']:
setattr(shallow_copy, attr, getattr(self, attr))
return shallow_copy
class OrmTest(orm_meta.Orm):
"""
A barebones Orm that can be used for most tests.
Instance Attributes:
_mock_db_results ([]): A list of objects that would be in the database if
there were a db. Meant to store results for add() so they can be
checked later, etc.
"""
def __init__(self, db):
"""
Add the `_mock_db_results` instance attribute.
"""
super().__init__(db)
self._mock_db_results = []
# NOTE: Tables before Enums below to trick dupe code pylint check
def _create_schema_table_company(self):
"""
Not needed / will not be used.
"""
def _create_schema_table_datafeed_src(self):
"""
Not needed / will not be used.
"""
def _create_schema_table_exchange(self):
"""
Not needed / will not be used.
"""
def _create_schema_table_security(self):
"""
Not needed / will not be used.
"""
def _create_schema_table_security_price(self):
"""
Not needed / will not be used.
"""
def _create_schema_table_stock_adjustment(self):
"""
Not needed / will not be used.
"""
def _create_schema_enum_currency(self):
"""
Not needed / will not be used.
"""
def _create_schema_enum_market(self):
"""
Not needed / will not be used.
"""
def _create_schema_enum_price_frequency(self):
"""
Not needed / will not be used.
"""
def add(self, model_cls, data, **kwargs):
"""
Fake adding something to mock results, and check cols. Expected to
check afterwards.
Raises:
(psycopg2.errors.GeneratedAlways): Raised as a simulated error if
attempting to update a read only column. This should be triggered
and tested as part of a test.
[Pass through expected]
"""
OrmTest._validate_cols(data.keys(), model_cls)
if any(c in data for c in model_cls._read_only_columns):
raise psycopg2.errors.GeneratedAlways( # pylint: disable=no-member
'...can only be updated to DEFAULT')
res = {
'model': model_cls(self, data),
'extra_args': kwargs,
}
self._mock_db_results.append(res)
def update(self, model_cls, data, where, **kwargs):
"""
Fake updating something in mock results, and check cols. Expected to
have existing data. Limited 'where' clause support.
Raises:
(psycopg2.errors.GeneratedAlways): Raised as a simulated error if
attempting to update a read only column. This should be triggered
and tested as part of a test.
(ValueError): Raised if an unsupported LogicOp is provided. This has
much more limited support for LogicOps to limit how much needs to be
implemented here. If this is raised, the test should be redesigned
to avoid it rather than catching it.
[Pass through expected]
"""
OrmTest._validate_cols(data.keys(), model_cls)
if where[1] is not model_meta.LogicOp.EQUALS:
raise ValueError('Test Error: Provided LogicOp not supported')
if any(c in data for c in model_cls._read_only_columns):
raise psycopg2.errors.GeneratedAlways( # pylint: disable=no-member
'...can only be updated to DEFAULT')
for res in self._mock_db_results:
if getattr(res['model'], where[0]) == where[2]:
for k, v in data.items():
setattr(res['model'], k, v)
# Intentionally override extra_args to be able to test
res['extra_args'] = kwargs
def delete(self, model_cls, where, really_delete_all=False, **kwargs):
"""
Fake deleting something in mock results, and check cols. Expected to
have existing data. Limited 'where' clause support.
Raises:
(ValueError):
[with respect to LogicOp] Raised if an unsupported LogicOp is
provided. This has much more limited support for LogicOps to
limit how much needs to be implemented here. If this is raised,
the test should be redesigned to avoid it rather than catching it.
[with respect to really_delete_all] Raised as a simulated error if
the where clause is empty but `really_delete_all` was not set to
True. This should be triggered and tested as part of a test.
"""
if where and where[1] is not model_meta.LogicOp.EQUALS:
raise ValueError('Test Error: Provided LogicOp not supported')
if not where and really_delete_all:
self._mock_db_results = []
return
if not where:
raise ValueError('Need to confirm w/ really_delete_all')
for res in self._mock_db_results:
if getattr(res['model'], where[0]) == where[2]:
del res['model']
# Intentionally override extra_args to be able to test
res['extra_args'] = kwargs
def query(self, model_cls, return_as, columns_to_return=None,
where=None, limit=None, order=None, **kwargs):
"""
Fake querying something from mock results, and check cols. Expected to
have existing data. Limited 'where' and 'order' clause support.
Raises:
(psycopg2.errors.GeneratedAlways): Raised as a simulated error if
attempting to update a read only column. This should be triggered
and tested as part of a test.
(ValueError): Raised if an unsupported LogicOp, SortOrder, or ReturnAs
is provided. This has much more limited support for LogicOps,
SortOrders, and ReturnAs options to limit how much needs to be
implemented here. If this is raised, the test should be redesigned
to avoid it rather than catching it.
[Pass through expected]
"""
#pylint: disable=too-many-branches
if columns_to_return is not None:
OrmTest._validate_cols(columns_to_return, model_cls)
if where and where[1] is not model_meta.LogicOp.EQUALS:
raise ValueError('Test Error: Provided LogicOp not supported')
if order and order[1] is not model_meta.SortOrder.DESC:
raise ValueError('Test Error: Provided SortOrder not supported')
cols_to_omit = []
if columns_to_return:
for col in ModelTest._columns:
if col not in columns_to_return:
cols_to_omit.append(col)
results = []
for res in self._mock_db_results:
if not where or getattr(res['model'], where[0]) == where[2]:
# Intentionally override extra_args to be able to test
res_copy = {
'model': copy.copy(res['model']),
'extra_args': kwargs,
}
for col in cols_to_omit:
setattr(res_copy['model'], col, None)
results.append(res_copy)
if order:
# Hard-coded to only support DESC, hence reverse is True
results.sort(key=lambda mdl: getattr(mdl['model'], order[0]),
reverse=True)
if limit is not None and limit < len(results):
results = results[:limit]
if model_meta.ReturnAs(return_as) is model_meta.ReturnAs.MODEL:
return [r['model'] for r in results]
if model_meta.ReturnAs(return_as) is model_meta.ReturnAs.PANDAS:
# Flatten 'model' level of dict for pd.df import
mdl_cols = columns_to_return or ModelTest._columns
for res in results:
for col in mdl_cols:
res[col] = getattr(res['model'], col)
del res['model']
return | pd.DataFrame(results) | pandas.DataFrame |
# Import standard python libraries.
import pandas as pd
import numpy as np
import pathlib
import warnings
import sys
# Import the functions used throughout this project from the function dictionary library file
fileDir = pathlib.Path(__file__).parents[2]
code_library_folder = fileDir / 'Code' / 'function_dictionary_library'
sys.path.append(str(code_library_folder))
from coal_data_processing_functions import state_abbreviations, generic_coal_rank, lower_case_data_keys
from statistical_functions import ecdf, weighted_ecdf
from statistics import mean
def weighted_coal_ecdf(coal):
warnings
# Read in (1) COALQUAL Data (2) and the amount of coal mining done in each county. We use skipfooter to not read in the
# search criteria rows.
coalqual_filename = fileDir / 'Data' / 'COALQUAL Data' / 'CQ_upper_level.csv'
COALQUAL = pd.read_csv(coalqual_filename, header=0,
names=['Sample_ID', 'State', 'County', 'Province', 'Region', 'Field', 'Formation', 'Bed',
'Apparent_Rank', 'Sulfur', 'Heat', 'Arsenic', 'Boron', 'Bromine', 'Chlorides',
'Mercury',
'Lead', 'Selenium'], usecols=[0, 1, 2, 5, 6, 7, 9, 11, 28, 84, 87, 147, 151, 159, 165,
191, 219, 239])
mining_volume_filename = fileDir / 'Intermediate' / 'Coal Mining By Counties.csv'
Mining_Volume = pd.read_csv(mining_volume_filename, header=0, names=['Coal_Sales', 'FIPS_Code_State',
'County_Name_State_Normal_Capitalization'],
usecols=[1, 2, 8])
# Drop COALQUAL anthracite and samples with blank apparent rank.
COALQUAL = COALQUAL[COALQUAL.Apparent_Rank != 'Anthracite']
COALQUAL = COALQUAL[COALQUAL.Apparent_Rank != 'Semianthracite']
COALQUAL = COALQUAL[COALQUAL.Apparent_Rank != 'Rock']
COALQUAL = COALQUAL.dropna(subset=['Apparent_Rank'])
# Classify apparent ranks into broad categories.
COALQUAL['Rank'] = generic_coal_rank(COALQUAL.Apparent_Rank)
# Process the columns that will serve as keys for the data merging.
COALQUAL['State_Abbreviation'] = state_abbreviations(COALQUAL.State)
County_Name_State_Normal_Capitalization = COALQUAL['County'] + ' County, ' + COALQUAL['State_Abbreviation']
COALQUAL['County_Name_State'] = lower_case_data_keys(County_Name_State_Normal_Capitalization)
Mining_Volume['County_Name_State'] = lower_case_data_keys(Mining_Volume['County_Name_State_Normal_Capitalization'])
# mask = pd.Series(np.isfinite(COALQUAL['Chlorides']))
COALQUAL_all_samples_Cl = COALQUAL.loc[pd.Series(np.isfinite(COALQUAL['Chlorides']))]
COALQUAL_all_samples_Br = COALQUAL.loc[pd.Series(np.isfinite(COALQUAL['Bromine']))]
COALQUAL_all_samples_Cl = COALQUAL_all_samples_Cl.groupby(['County_Name_State']).mean()
COALQUAL_all_samples_Cl['County_Name_State'] = COALQUAL_all_samples_Cl.index
COALQUAL_all_samples_Cl = pd.merge(COALQUAL_all_samples_Cl, Mining_Volume, on='County_Name_State')
COALQUAL_all_samples_Br = COALQUAL_all_samples_Br.groupby(['County_Name_State']).mean()
COALQUAL_all_samples_Br['County_Name_State'] = COALQUAL_all_samples_Br.index
COALQUAL_all_samples_Br = pd.merge(COALQUAL_all_samples_Br, Mining_Volume, on='County_Name_State')
qe_Cl_All, pe_Cl_All = weighted_ecdf(COALQUAL_all_samples_Cl['Chlorides'], COALQUAL_all_samples_Cl['Coal_Sales'])
qe_Br_All, pe_Br_All = weighted_ecdf(COALQUAL_all_samples_Br['Bromine'], COALQUAL_all_samples_Br['Coal_Sales'])
# For Appalachian Low Sulfur Coal
if coal == 'Appalachian Low Sulfur':
COALQUAL = COALQUAL[
(COALQUAL['Region'] == 'SOUTHERN APPALACHIAN') | (COALQUAL['Region'] == 'CENTRAL APPALACHIAN')
| (COALQUAL['Region'] == 'NORTHERN APPALACHIAN')]
# USGS Circular 891 defines "low sulfur coal" as less than 1% total sulfur (https://pubs.usgs.gov/circ/c891/glossary.htm).
# This is identical to the standard used by the EIA.
COALQUAL = COALQUAL[COALQUAL['Sulfur'] < 1]
COALQUAL = COALQUAL.groupby(['County_Name_State']).mean()
COALQUAL['County_Name_State'] = COALQUAL.index
COALQUAL = pd.merge(COALQUAL, Mining_Volume, on='County_Name_State')
#Chlorides = [x for x in COALQUAL['Chlorides'] if x != '']
chlorine = COALQUAL[np.isfinite(COALQUAL['Chlorides'])]
selenium = COALQUAL[np.isfinite(COALQUAL['Selenium'])]
boron = COALQUAL[np.isfinite(COALQUAL['Boron'])]
bromine = COALQUAL[np.isfinite(COALQUAL['Bromine'])]
lead = COALQUAL[np.isfinite(COALQUAL['Lead'])]
arsenic = COALQUAL[np.isfinite(COALQUAL['Arsenic'])]
mercury = COALQUAL[np.isfinite(COALQUAL['Mercury'])]
heat = COALQUAL[np.isfinite(COALQUAL['Heat'])]
sulfur = COALQUAL[np.isfinite(COALQUAL['Sulfur'])]
qe_Cl, pe_Cl = weighted_ecdf(chlorine['Chlorides'], chlorine['Coal_Sales'])
qe_Se, pe_Se = weighted_ecdf(selenium['Selenium'], selenium['Coal_Sales'])
qe_B, pe_B = weighted_ecdf(boron['Boron'], boron['Coal_Sales'])
qe_Br, pe_Br = weighted_ecdf(bromine['Bromine'], bromine['Coal_Sales'])
qe_Pb, pe_Pb = weighted_ecdf(lead['Lead'], lead['Coal_Sales'])
qe_As, pe_As = weighted_ecdf(arsenic['Arsenic'], arsenic['Coal_Sales'])
qe_Hg, pe_Hg = weighted_ecdf(mercury['Mercury'], mercury['Coal_Sales'])
qe_Heat, pe_Heat = weighted_ecdf(heat['Heat'], heat['Coal_Sales'])
qe_Sulfur, pe_Sulfur = weighted_ecdf(sulfur['Sulfur'], sulfur['Coal_Sales'])
gross_heat_rate = 8188 # Btu/kWh
FGD_water_treatment = 2.14e-4 # m^3/kWh
# For Appalachian Medium Sulfur Coal
elif coal == 'Appalachian Med Sulfur':
COALQUAL = COALQUAL[
(COALQUAL['Region'] == 'SOUTHERN APPALACHIAN') | (COALQUAL['Region'] == 'CENTRAL APPALACHIAN') | (
COALQUAL['Region'] == 'NORTHERN APPALACHIAN')]
COALQUAL = COALQUAL[(COALQUAL['Sulfur'] > 1) & (COALQUAL['Sulfur'] < 3)]
COALQUAL = COALQUAL.groupby(['County_Name_State']).mean()
COALQUAL['County_Name_State'] = COALQUAL.index
COALQUAL = pd.merge(COALQUAL, Mining_Volume, on='County_Name_State')
chlorine = COALQUAL[np.isfinite(COALQUAL['Chlorides'])]
selenium = COALQUAL[np.isfinite(COALQUAL['Selenium'])]
boron = COALQUAL[np.isfinite(COALQUAL['Boron'])]
bromine = COALQUAL[np.isfinite(COALQUAL['Bromine'])]
lead = COALQUAL[np.isfinite(COALQUAL['Lead'])]
arsenic = COALQUAL[np.isfinite(COALQUAL['Arsenic'])]
mercury = COALQUAL[np.isfinite(COALQUAL['Mercury'])]
heat = COALQUAL[np.isfinite(COALQUAL['Heat'])]
sulfur = COALQUAL[np.isfinite(COALQUAL['Sulfur'])]
qe_Cl, pe_Cl = weighted_ecdf(chlorine['Chlorides'], chlorine['Coal_Sales'])
qe_Se, pe_Se = weighted_ecdf(selenium['Selenium'], selenium['Coal_Sales'])
qe_B, pe_B = weighted_ecdf(boron['Boron'], boron['Coal_Sales'])
qe_Br, pe_Br = weighted_ecdf(bromine['Bromine'], bromine['Coal_Sales'])
qe_Pb, pe_Pb = weighted_ecdf(lead['Lead'], lead['Coal_Sales'])
qe_As, pe_As = weighted_ecdf(arsenic['Arsenic'], arsenic['Coal_Sales'])
qe_Hg, pe_Hg = weighted_ecdf(mercury['Mercury'], mercury['Coal_Sales'])
qe_Heat, pe_Heat = weighted_ecdf(heat['Heat'], heat['Coal_Sales'])
qe_Sulfur, pe_Sulfur = weighted_ecdf(sulfur['Sulfur'], sulfur['Coal_Sales'])
gross_heat_rate = 8210 # Btu/kWh
FGD_water_treatment = 2.20e-4 # m^3/kWh
# For Beulah-Zap Bed Coal
elif coal == 'Beulah-Zap':
COALQUAL = COALQUAL[(COALQUAL['Bed'] == 'BEULAH-ZAP')]
COALQUAL = COALQUAL.groupby(['County_Name_State']).mean()
COALQUAL['County_Name_State'] = COALQUAL.index
COALQUAL = pd.merge(COALQUAL, Mining_Volume, on='County_Name_State')
chlorine = COALQUAL[np.isfinite(COALQUAL['Chlorides'])]
selenium = COALQUAL[np.isfinite(COALQUAL['Selenium'])]
boron = COALQUAL[np.isfinite(COALQUAL['Boron'])]
lead = COALQUAL[np.isfinite(COALQUAL['Lead'])]
arsenic = COALQUAL[np.isfinite(COALQUAL['Arsenic'])]
mercury = COALQUAL[np.isfinite(COALQUAL['Mercury'])]
heat = COALQUAL[np.isfinite(COALQUAL['Heat'])]
sulfur = COALQUAL[np.isfinite(COALQUAL['Sulfur'])]
qe_Cl, pe_Cl = weighted_ecdf(chlorine['Chlorides'], chlorine['Coal_Sales'])
qe_Se, pe_Se = weighted_ecdf(selenium['Selenium'], selenium['Coal_Sales'])
qe_B, pe_B = weighted_ecdf(boron['Boron'], boron['Coal_Sales'])
qe_Pb, pe_Pb = weighted_ecdf(lead['Lead'], lead['Coal_Sales'])
qe_As, pe_As = weighted_ecdf(arsenic['Arsenic'], arsenic['Coal_Sales'])
qe_Hg, pe_Hg = weighted_ecdf(mercury['Mercury'], mercury['Coal_Sales'])
qe_Heat, pe_Heat = weighted_ecdf(heat['Heat'], heat['Coal_Sales'])
qe_Sulfur, pe_Sulfur = weighted_ecdf(sulfur['Sulfur'], sulfur['Coal_Sales'])
qe_Br = qe_Br_All
pe_Br = pe_Br_All
gross_heat_rate = 8680 # Btu/kWh
FGD_water_treatment = 2.36e-4 # m^3/kWh
# For Illinois #6 Coal
elif coal == 'Illinois #6':
COALQUAL = COALQUAL[(COALQUAL['Bed'] == 'HERRIN NO 6')]
COALQUAL = COALQUAL.groupby(['County_Name_State']).mean()
COALQUAL['County_Name_State'] = COALQUAL.index
COALQUAL = | pd.merge(COALQUAL, Mining_Volume, on='County_Name_State') | pandas.merge |
from os.path import join
import numpy as np
import streamlit as st
import pandas as pd
import datetime
import plotly.express as px
import plotly.graph_objects as go
import requests
from streamlit import caching
st.set_page_config(page_title="Covid Dashboard", page_icon="🕸", layout='wide', initial_sidebar_state='expanded')
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
pd.options.mode.chained_assignment=None
def new_cases_global(data,days=False,column_name=None):
if not days:
days=len(data)
orginal_data_column = list(data.columns)[-days:]
edited_data=data[orginal_data_column].T
last=list(data.columns[-days:])
new_case_list=[]
for i in range(len(orginal_data_column)):
if last[0]==orginal_data_column[i]:
add_date=orginal_data_column[i-1]
for i in range(len(last)):
if i == 0:
columns = list(data.columns)
for j in range(len(columns)):
if columns[j] == last[0]:
new_case = int(abs(sum(data[columns[j-1]]) - sum(data[last[0]])))
new_case_list.append({column_name:new_case})
else:
new_case = int(abs(sum(edited_data.loc[last[i-1]])-sum(edited_data.loc[last[i]])))
new_case_list.append({column_name:new_case})
new_cases_data=pd.DataFrame(new_case_list,index=last)
new_cases_data.index= pd.to_datetime(new_cases_data.index)
return new_cases_data
option_1,option_2,option_3 = "Covid Global dashboard","Covid Vaccination (India)","Covid India dashboard"
dashboard_options = st.sidebar.selectbox("How would you like to be contacted?",(option_1,option_2,option_3))
if dashboard_options == option_1:
st.title(option_1)
column_1 , column_2 , column_3 , column_4 = st.beta_columns((2, 1, 1, 1))
col1 , col2 ,col3 = st.beta_columns(3)
st.sidebar.write('Developed with ❤ by [<NAME>](https://www.Abhay.dev/)')
#st.sidebar.write('Data is obtained from [JHU](https://github.com/CSSEGISandData/COVID-19)')
def format_as_indian(value):
input_list = list(str(value))
if len(input_list) <= 1:
formatted_input = value
else:
first_number = input_list.pop(0)
last_number = input_list.pop()
formatted_input = first_number + ((''.join(l + ',' * (n % 2 == 1) for n, l in enumerate(reversed(input_list)))[::-1] + last_number))
if len(input_list) % 2 == 0:
formatted_input.lstrip(',')
return formatted_input
@st.cache
def fetch_data(url):
try:
data = pd.read_csv(url)
columns = []
for i in list(data.columns):
if i.lower() == "long" or i.lower() == "long_":
columns.append("lon")
else:
columns.append(i.lower())
data.columns=columns
return data
except:
return 0
@st.cache
def get_data(url):
data = | pd.read_csv(url) | pandas.read_csv |
# %% [markdown]
# This notebook is a VSCode notebook version of:
# https://www.kaggle.com/georsara1/lightgbm-all-tables-included-0-778
#
# You could find the data from:
# https://www.kaggle.com/c/home-credit-default-risk/data
## All the data files should be in the same directory with this file!
#%% Importing necessary libraries
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
from sklearn.feature_selection import VarianceThreshold
#%%
# Importing data
data = | pd.read_csv('application_train.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 7 09:40:49 2018
@author: yuwei
"""
import pandas as pd
import numpy as np
import math
import random
import time
import scipy as sp
import xgboost as xgb
def loadData():
"下载数据"
trainSet = pd.read_table('round1_ijcai_18_train_20180301.txt',sep=' ')
testSet = pd.read_table('round1_ijcai_18_test_a_20180301.txt',sep=' ')
return trainSet,testSet
def splitData(trainSet,testSet):
"按时间划分验证集"
#转化测试集时间戳为标准时间
time_local = testSet.context_timestamp.map(lambda x :time.localtime(x))
time_local = time_local.map(lambda x :time.strftime("%Y-%m-%d %H:%M:%S",x))
testSet['context_timestamp'] = time_local
#转化训练集时间戳为标准时间
time_local = trainSet.context_timestamp.map(lambda x :time.localtime(x))
time_local = time_local.map(lambda x :time.strftime("%Y-%m-%d %H:%M:%S",x))
trainSet['context_timestamp'] = time_local
del time_local
#处理训练集item_category_list属性
trainSet['item_category_list'] = trainSet.item_category_list.map(lambda x :x.split(';'))
trainSet['item_category_list_2'] = trainSet.item_category_list.map(lambda x :x[1])
trainSet['item_category_list_3'] = trainSet.item_category_list.map(lambda x :x[2] if len(x) >2 else -1)
trainSet['item_category_list_2'] = list(map(lambda x,y : x if (y == -1) else y,trainSet['item_category_list_2'],trainSet['item_category_list_3']))
#处理测试集item_category_list属性
testSet['item_category_list'] = testSet.item_category_list.map(lambda x :x.split(';'))
testSet['item_category_list_2'] = testSet.item_category_list.map(lambda x :x[1])
testSet['item_category_list_3'] = testSet.item_category_list.map(lambda x :x[2] if len(x) >2 else -1)
testSet['item_category_list_2'] = list(map(lambda x,y : x if (y == -1) else y,testSet['item_category_list_2'],testSet['item_category_list_3']))
del trainSet['item_category_list_3'];del testSet['item_category_list_3'];
#处理predict_category_property的排名
trainSet['predict_category'] = trainSet['predict_category_property'].map(lambda x :[y.split(':')[0] for y in x.split(';')])
trainSet['predict_category_property_rank'] = list(map(lambda x,y:y.index(x) if x in y else -1,trainSet['item_category_list_2'],trainSet['predict_category']))
testSet['predict_category'] = testSet['predict_category_property'].map(lambda x :[y.split(':')[0] for y in x.split(';')])
testSet['predict_category_property_rank'] = list(map(lambda x,y:y.index(x) if x in y else -1,testSet['item_category_list_2'],testSet['predict_category']))
#统计item_category_list中和predict_category共同的个数
trainSet['item_category_count'] = list(map(lambda x,y:len(set(x)&set(y)),trainSet.item_category_list,trainSet.predict_category))
testSet['item_category_count'] = list(map(lambda x,y:len(set(x)&set(y)),testSet.item_category_list,testSet.predict_category))
#不同个数
trainSet['item_category_count'] = list(map(lambda x,y:len(set(x)) - len(set(x)&set(y)),trainSet.item_category_list,trainSet.predict_category))
testSet['item_category_count'] = list(map(lambda x,y:len(set(x)) - len(set(x)&set(y)),testSet.item_category_list,testSet.predict_category))
del trainSet['predict_category']; del testSet['predict_category']
"划分数据集"
#测试集 23-24号特征提取,25号打标
test = testSet
testFeat = trainSet[trainSet['context_timestamp']>'2018-09-23']
#验证集 22-23号特征提取,24号打标
validate = trainSet[trainSet['context_timestamp']>'2018-09-24']
validateFeat = trainSet[(trainSet['context_timestamp']>'2018-09-22') & (trainSet['context_timestamp']<'2018-09-24')]
#训练集 21-22号特征提取,23号打标;20-21号特征提取,22号打标;19-20号特征提取,21号打标;18-19号特征提取,20号打标
#标签区间
train1 = trainSet[(trainSet['context_timestamp']>'2018-09-23') & (trainSet['context_timestamp']<'2018-09-24')]
train2 = trainSet[(trainSet['context_timestamp']>'2018-09-22') & (trainSet['context_timestamp']<'2018-09-23')]
train3 = trainSet[(trainSet['context_timestamp']>'2018-09-21') & (trainSet['context_timestamp']<'2018-09-22')]
train4 = trainSet[(trainSet['context_timestamp']>'2018-09-20') & (trainSet['context_timestamp']<'2018-09-21')]
#特征区间
trainFeat1 = trainSet[(trainSet['context_timestamp']>'2018-09-21') & (trainSet['context_timestamp']<'2018-09-23')]
trainFeat2 = trainSet[(trainSet['context_timestamp']>'2018-09-20') & (trainSet['context_timestamp']<'2018-09-22')]
trainFeat3 = trainSet[(trainSet['context_timestamp']>'2018-09-19') & (trainSet['context_timestamp']<'2018-09-21')]
trainFeat4 = trainSet[(trainSet['context_timestamp']>'2018-09-18') & (trainSet['context_timestamp']<'2018-09-20')]
return test,testFeat,validate,validateFeat,train1,trainFeat1,train2,trainFeat2,train3,trainFeat3,train4,trainFeat4
def modelXgb(train,test):
"xgb模型"
train_y = train['is_trade'].values
# train_x = train.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade'
# ],axis=1).values
# test_x = test.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade'
# ],axis=1).values
# test_x = test.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property'
# ],axis=1).values
#根据皮卡尔相关系数,drop相关系数低于-0.2的属性
train_x = train.drop(['item_brand_id',
'item_city_id','user_id','shop_id','context_id',
'instance_id', 'item_id','item_category_list',
'item_property_list', 'context_timestamp',
'predict_category_property','is_trade',
'item_price_level','user_rank_down',
'item_category_list_2_not_buy_count',
'item_category_list_2_count',
'user_first'
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service',
],axis=1).values
# test_x = test.drop(['item_brand_id',
# 'item_city_id','user_id','shop_id','context_id',
# 'instance_id', 'item_id','item_category_list',
# 'item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade',
# 'item_price_level','user_rank_down',
# 'item_category_list_2_not_buy_count',
# 'item_category_list_2_count',
# 'user_first',
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service'
# ],axis=1).values
test_x = test.drop(['item_brand_id',
'item_city_id','user_id','shop_id','context_id',
'instance_id', 'item_id','item_category_list',
'item_property_list', 'context_timestamp',
'predict_category_property',
'item_price_level','user_rank_down',
'item_category_list_2_not_buy_count',
'item_category_list_2_count',
'user_first',
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service'
],axis=1).values
dtrain = xgb.DMatrix(train_x, label=train_y)
dtest = xgb.DMatrix(test_x)
# 模型参数
params = {'booster': 'gbtree',
'objective':'binary:logistic',
'eval_metric':'logloss',
'eta': 0.03,
'max_depth': 5, # 6
'colsample_bytree': 0.8,#0.8
'subsample': 0.8,
'scale_pos_weight': 1,
'min_child_weight': 18 # 2
}
# 训练
watchlist = [(dtrain,'train')]
bst = xgb.train(params, dtrain, num_boost_round=700,evals=watchlist)
# 预测
predict = bst.predict(dtest)
# test_xy = test[['instance_id','is_trade']]
test_xy = test[['instance_id']]
test_xy['predicted_score'] = predict
return test_xy
def get_item_feat(data,dataFeat):
"item的特征提取"
result = pd.DataFrame(dataFeat['item_id'])
result = result.drop_duplicates(['item_id'],keep='first')
"1.统计item出现次数"
dataFeat['item_count'] = dataFeat['item_id']
feat = pd.pivot_table(dataFeat,index=['item_id'],values='item_count',aggfunc='count').reset_index()
del dataFeat['item_count']
result = pd.merge(result,feat,on=['item_id'],how='left')
"2.统计item历史被购买的次数"
dataFeat['item_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_id'],values='item_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_buy_count']
result = pd.merge(result,feat,on=['item_id'],how='left')
"3.统计item转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_buy_count,result.item_count))
result['item_buy_ratio'] = buy_ratio
"4.统计item历史未被够买的次数"
result['item_not_buy_count'] = result['item_count'] - result['item_buy_count']
return result
def get_user_feat(data,dataFeat):
"user的特征提取"
result = pd.DataFrame(dataFeat['user_id'])
result = result.drop_duplicates(['user_id'],keep='first')
"1.统计user出现次数"
dataFeat['user_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id'],values='user_count',aggfunc='count').reset_index()
del dataFeat['user_count']
result = pd.merge(result,feat,on=['user_id'],how='left')
"2.统计user历史被购买的次数"
dataFeat['user_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id'],values='user_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_buy_count']
result = pd.merge(result,feat,on=['user_id'],how='left')
"3.统计user转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_buy_count,result.user_count))
result['user_buy_ratio'] = buy_ratio
"4.统计user历史未被够买的次数"
result['user_not_buy_count'] = result['user_count'] - result['user_buy_count']
return result
def get_context_feat(data,dataFeat):
"context的特征提取"
result = pd.DataFrame(dataFeat['context_id'])
result = result.drop_duplicates(['context_id'],keep='first')
"1.统计context出现次数"
dataFeat['context_count'] = dataFeat['context_id']
feat = pd.pivot_table(dataFeat,index=['context_id'],values='context_count',aggfunc='count').reset_index()
del dataFeat['context_count']
result = pd.merge(result,feat,on=['context_id'],how='left')
"2.统计context历史被购买的次数"
dataFeat['context_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_id'],values='context_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_buy_count']
result = pd.merge(result,feat,on=['context_id'],how='left')
"3.统计context转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_buy_count,result.context_count))
result['context_buy_ratio'] = buy_ratio
"4.统计context历史未被够买的次数"
result['context_not_buy_count'] = result['context_count'] - result['context_buy_count']
return result
def get_shop_feat(data,dataFeat):
"shop的特征提取"
result = pd.DataFrame(dataFeat['shop_id'])
result = result.drop_duplicates(['shop_id'],keep='first')
"1.统计shop出现次数"
dataFeat['shop_count'] = dataFeat['shop_id']
feat = pd.pivot_table(dataFeat,index=['shop_id'],values='shop_count',aggfunc='count').reset_index()
del dataFeat['shop_count']
result = pd.merge(result,feat,on=['shop_id'],how='left')
"2.统计shop历史被购买的次数"
dataFeat['shop_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['shop_id'],values='shop_buy_count',aggfunc='sum').reset_index()
del dataFeat['shop_buy_count']
result = pd.merge(result,feat,on=['shop_id'],how='left')
"3.统计shop转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.shop_buy_count,result.shop_count))
result['shop_buy_ratio'] = buy_ratio
"4.统计shop历史未被够买的次数"
result['shop_not_buy_count'] = result['shop_count'] - result['shop_buy_count']
return result
def get_timestamp_feat(data,dataFeat):
"context_timestamp的特征提取"
result = pd.DataFrame(dataFeat['context_timestamp'])
result = result.drop_duplicates(['context_timestamp'],keep='first')
"1.统计context_timestamp出现次数"
dataFeat['context_timestamp_count'] = dataFeat['context_timestamp']
feat = pd.pivot_table(dataFeat,index=['context_timestamp'],values='context_timestamp_count',aggfunc='count').reset_index()
del dataFeat['context_timestamp_count']
result = pd.merge(result,feat,on=['context_timestamp'],how='left')
"2.统计context_timestamp历史被购买的次数"
dataFeat['context_timestamp_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_timestamp'],values='context_timestamp_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_timestamp_buy_count']
result = pd.merge(result,feat,on=['context_timestamp'],how='left')
"3.统计context_timestamp转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_timestamp_buy_count,result.context_timestamp_count))
result['context_timestamp_buy_ratio'] = buy_ratio
"4.统计context_timestamp历史未被够买的次数"
result['context_timestamp_not_buy_count'] = result['context_timestamp_count'] - result['context_timestamp_buy_count']
return result
def get_item_brand_feat(data,dataFeat):
"item_brand的特征提取"
result = pd.DataFrame(dataFeat['item_brand_id'])
result = result.drop_duplicates(['item_brand_id'],keep='first')
"1.统计item_brand出现次数"
dataFeat['item_brand_count'] = dataFeat['item_brand_id']
feat = pd.pivot_table(dataFeat,index=['item_brand_id'],values='item_brand_count',aggfunc='count').reset_index()
del dataFeat['item_brand_count']
result = pd.merge(result,feat,on=['item_brand_id'],how='left')
"2.统计item_brand历史被购买的次数"
dataFeat['item_brand_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_brand_id'],values='item_brand_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_brand_buy_count']
result = pd.merge(result,feat,on=['item_brand_id'],how='left')
"3.统计item_brand转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_brand_buy_count,result.item_brand_count))
result['item_brand_buy_ratio'] = buy_ratio
"4.统计item_brand历史未被够买的次数"
result['item_brand_not_buy_count'] = result['item_brand_count'] - result['item_brand_buy_count']
return result
def get_item_city_feat(data,dataFeat):
"item_city的特征提取"
result = pd.DataFrame(dataFeat['item_city_id'])
result = result.drop_duplicates(['item_city_id'],keep='first')
"1.统计item_city出现次数"
dataFeat['item_city_count'] = dataFeat['item_city_id']
feat = pd.pivot_table(dataFeat,index=['item_city_id'],values='item_city_count',aggfunc='count').reset_index()
del dataFeat['item_city_count']
result = pd.merge(result,feat,on=['item_city_id'],how='left')
"2.统计item_city历史被购买的次数"
dataFeat['item_city_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_city_id'],values='item_city_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_city_buy_count']
result = pd.merge(result,feat,on=['item_city_id'],how='left')
"3.统计item_city转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_city_buy_count,result.item_city_count))
result['item_city_buy_ratio'] = buy_ratio
"4.统计item_city历史未被够买的次数"
result['item_city_not_buy_count'] = result['item_city_count'] - result['item_city_buy_count']
return result
def get_user_gender_feat(data,dataFeat):
"user_gender的特征提取"
result = pd.DataFrame(dataFeat['user_gender_id'])
result = result.drop_duplicates(['user_gender_id'],keep='first')
"1.统计user_gender出现次数"
dataFeat['user_gender_count'] = dataFeat['user_gender_id']
feat = pd.pivot_table(dataFeat,index=['user_gender_id'],values='user_gender_count',aggfunc='count').reset_index()
del dataFeat['user_gender_count']
result = pd.merge(result,feat,on=['user_gender_id'],how='left')
"2.统计user_gender历史被购买的次数"
dataFeat['user_gender_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_gender_id'],values='user_gender_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_gender_buy_count']
result = pd.merge(result,feat,on=['user_gender_id'],how='left')
"3.统计user_gender转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_gender_buy_count,result.user_gender_count))
result['user_gender_buy_ratio'] = buy_ratio
"4.统计user_gender历史未被够买的次数"
result['user_gender_not_buy_count'] = result['user_gender_count'] - result['user_gender_buy_count']
return result
def get_user_occupation_feat(data,dataFeat):
"user_occupation的特征提取"
result = pd.DataFrame(dataFeat['user_occupation_id'])
result = result.drop_duplicates(['user_occupation_id'],keep='first')
"1.统计user_occupation出现次数"
dataFeat['user_occupation_count'] = dataFeat['user_occupation_id']
feat = pd.pivot_table(dataFeat,index=['user_occupation_id'],values='user_occupation_count',aggfunc='count').reset_index()
del dataFeat['user_occupation_count']
result = pd.merge(result,feat,on=['user_occupation_id'],how='left')
"2.统计user_occupation历史被购买的次数"
dataFeat['user_occupation_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_occupation_id'],values='user_occupation_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_occupation_buy_count']
result = pd.merge(result,feat,on=['user_occupation_id'],how='left')
"3.统计user_occupation转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_occupation_buy_count,result.user_occupation_count))
result['user_occupation_buy_ratio'] = buy_ratio
"4.统计user_occupation历史未被够买的次数"
result['user_occupation_not_buy_count'] = result['user_occupation_count'] - result['user_occupation_buy_count']
return result
def get_context_page_feat(data,dataFeat):
"context_page的特征提取"
result = pd.DataFrame(dataFeat['context_page_id'])
result = result.drop_duplicates(['context_page_id'],keep='first')
"1.统计context_page出现次数"
dataFeat['context_page_count'] = dataFeat['context_page_id']
feat = pd.pivot_table(dataFeat,index=['context_page_id'],values='context_page_count',aggfunc='count').reset_index()
del dataFeat['context_page_count']
result = pd.merge(result,feat,on=['context_page_id'],how='left')
"2.统计context_page历史被购买的次数"
dataFeat['context_page_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_page_id'],values='context_page_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_page_buy_count']
result = pd.merge(result,feat,on=['context_page_id'],how='left')
"3.统计context_page转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_page_buy_count,result.context_page_count))
result['context_page_buy_ratio'] = buy_ratio
"4.统计context_page历史未被够买的次数"
result['context_page_not_buy_count'] = result['context_page_count'] - result['context_page_buy_count']
return result
def get_shop_review_num_level_feat(data,dataFeat):
"context_page的特征提取"
result = pd.DataFrame(dataFeat['shop_review_num_level'])
result = result.drop_duplicates(['shop_review_num_level'],keep='first')
"1.统计shop_review_num_level出现次数"
dataFeat['shop_review_num_level_count'] = dataFeat['shop_review_num_level']
feat = pd.pivot_table(dataFeat,index=['shop_review_num_level'],values='shop_review_num_level_count',aggfunc='count').reset_index()
del dataFeat['shop_review_num_level_count']
result = pd.merge(result,feat,on=['shop_review_num_level'],how='left')
"2.统计shop_review_num_level历史被购买的次数"
dataFeat['shop_review_num_level_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['shop_review_num_level'],values='shop_review_num_level_buy_count',aggfunc='sum').reset_index()
del dataFeat['shop_review_num_level_buy_count']
result = pd.merge(result,feat,on=['shop_review_num_level'],how='left')
"3.统计shop_review_num_level转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.shop_review_num_level_buy_count,result.shop_review_num_level_count))
result['shop_review_num_level_buy_ratio'] = buy_ratio
"4.统计shop_review_num_level历史未被够买的次数"
result['shop_review_num_level_not_buy_count'] = result['shop_review_num_level_count'] - result['shop_review_num_level_buy_count']
return result
def get_item_category_list_2_feat(data,dataFeat):
"item_category_list_2的特征提取"
result = pd.DataFrame(dataFeat['item_category_list_2'])
result = result.drop_duplicates(['item_category_list_2'],keep='first')
"1.统计item_category_list_2出现次数"
dataFeat['item_category_list_2_count'] = dataFeat['item_category_list_2']
feat = pd.pivot_table(dataFeat,index=['item_category_list_2'],values='item_category_list_2_count',aggfunc='count').reset_index()
del dataFeat['item_category_list_2_count']
result = pd.merge(result,feat,on=['item_category_list_2'],how='left')
"2.统计item_category_list_2历史被购买的次数"
dataFeat['item_category_list_2_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_category_list_2'],values='item_category_list_2_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_category_list_2_buy_count']
result = pd.merge(result,feat,on=['item_category_list_2'],how='left')
"3.统计item_category_list_2转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_category_list_2_buy_count,result.item_category_list_2_count))
result['item_category_list_2_buy_ratio'] = buy_ratio
"4.统计item_category_list_2历史未被够买的次数"
result['item_category_list_2_not_buy_count'] = result['item_category_list_2_count'] - result['item_category_list_2_buy_count']
return result
def get_user_item_feat(data,dataFeat):
"user-item的特征提取"
result = pd.DataFrame(dataFeat[['user_id','item_id']])
result = result.drop_duplicates(['user_id','item_id'],keep='first')
"1.统计user-item出现次数"
dataFeat['user_item_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','item_id'],values='user_item_count',aggfunc='count').reset_index()
del dataFeat['user_item_count']
result = pd.merge(result,feat,on=['user_id','item_id'],how='left')
"2.统计user-item历史被购买的次数"
dataFeat['user_item_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','item_id'],values='user_item_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_item_buy_count']
result = pd.merge(result,feat,on=['user_id','item_id'],how='left')
"3.统计user-item转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_item_buy_count,result.user_item_count))
result['user_item_buy_ratio'] = buy_ratio
"4.统计user-item历史未被够买的次数"
result['user_item_not_buy_count'] = result['user_item_count'] - result['user_item_buy_count']
return result
def get_user_shop_feat(data,dataFeat):
"user-shop的特征提取"
result = pd.DataFrame(dataFeat[['user_id','shop_id']])
result = result.drop_duplicates(['user_id','shop_id'],keep='first')
"1.统计user-shop出现次数"
dataFeat['user_shop_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','shop_id'],values='user_shop_count',aggfunc='count').reset_index()
del dataFeat['user_shop_count']
result = pd.merge(result,feat,on=['user_id','shop_id'],how='left')
"2.统计user-shop历史被购买的次数"
dataFeat['user_shop_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','shop_id'],values='user_shop_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_shop_buy_count']
result = pd.merge(result,feat,on=['user_id','shop_id'],how='left')
"3.统计user-shop转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_shop_buy_count,result.user_shop_count))
result['user_shop_buy_ratio'] = buy_ratio
"4.统计user-shop历史未被够买的次数"
result['user_shop_not_buy_count'] = result['user_shop_count'] - result['user_shop_buy_count']
return result
def get_user_context_feat(data,dataFeat):
"user-context的特征提取"
result = pd.DataFrame(dataFeat[['user_id','context_id']])
result = result.drop_duplicates(['user_id','context_id'],keep='first')
"1.统计user-context出现次数"
dataFeat['user_context_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','context_id'],values='user_context_count',aggfunc='count').reset_index()
del dataFeat['user_context_count']
result = pd.merge(result,feat,on=['user_id','context_id'],how='left')
"2.统计user-context历史被购买的次数"
dataFeat['user_context_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','context_id'],values='user_context_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_context_buy_count']
result = pd.merge(result,feat,on=['user_id','context_id'],how='left')
"3.统计user-context转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_context_buy_count,result.user_context_count))
result['user_context_buy_ratio'] = buy_ratio
"4.统计user-context历史未被够买的次数"
result['user_context_not_buy_count'] = result['user_context_count'] - result['user_context_buy_count']
return result
def get_user_timestamp_feat(data,dataFeat):
"user-context_timestamp的特征提取"
result = pd.DataFrame(dataFeat[['user_id','context_timestamp']])
result = result.drop_duplicates(['user_id','context_timestamp'],keep='first')
"1.统计user-context_timestamp出现次数"
dataFeat['user_context_timestamp_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','context_timestamp'],values='user_context_timestamp_count',aggfunc='count').reset_index()
del dataFeat['user_context_timestamp_count']
result = pd.merge(result,feat,on=['user_id','context_timestamp'],how='left')
"2.统计user-context_timestamp历史被购买的次数"
dataFeat['user_context_timestamp_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','context_timestamp'],values='user_context_timestamp_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_context_timestamp_buy_count']
result = pd.merge(result,feat,on=['user_id','context_timestamp'],how='left')
"3.统计user-context_timestamp转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_context_timestamp_buy_count,result.user_context_timestamp_count))
result['user_context_timestamp_buy_ratio'] = buy_ratio
"4.统计user-context_timestamp历史未被够买的次数"
result['user_context_timestamp_not_buy_count'] = result['user_context_timestamp_count'] - result['user_context_timestamp_buy_count']
return result
def get_user_item_brand_feat(data,dataFeat):
"user-item_brand的特征提取"
result = pd.DataFrame(dataFeat[['user_id','item_brand_id']])
result = result.drop_duplicates(['user_id','item_brand_id'],keep='first')
"1.统计user-item_brand_id出现次数"
dataFeat['user_item_brand_id_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','item_brand_id'],values='user_item_brand_id_count',aggfunc='count').reset_index()
del dataFeat['user_item_brand_id_count']
result = pd.merge(result,feat,on=['user_id','item_brand_id'],how='left')
"2.统计user-item_brand_id历史被购买的次数"
dataFeat['user_item_brand_id_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','item_brand_id'],values='user_item_brand_id_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_item_brand_id_buy_count']
result = pd.merge(result,feat,on=['user_id','item_brand_id'],how='left')
"3.统计user-item_brand_id转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_item_brand_id_buy_count,result.user_item_brand_id_count))
result['user_item_brand_id_buy_ratio'] = buy_ratio
"4.统计user-item_brand_id历史未被够买的次数"
result['user_item_brand_id_not_buy_count'] = result['user_item_brand_id_count'] - result['user_item_brand_id_buy_count']
return result
def get_user_user_gender_feat(data,dataFeat):
"user-user_gender的特征提取"
result = pd.DataFrame(dataFeat[['user_id','user_gender_id']])
result = result.drop_duplicates(['user_id','user_gender_id'],keep='first')
"1.统计user-user_gender_id出现次数"
dataFeat['user_user_gender_id_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','user_gender_id'],values='user_user_gender_id_count',aggfunc='count').reset_index()
del dataFeat['user_user_gender_id_count']
result = pd.merge(result,feat,on=['user_id','user_gender_id'],how='left')
"2.统计user-user_gender_id历史被购买的次数"
dataFeat['user_user_gender_id_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','user_gender_id'],values='user_user_gender_id_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_user_gender_id_buy_count']
result = pd.merge(result,feat,on=['user_id','user_gender_id'],how='left')
"3.统计user-user_gender_id转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_user_gender_id_buy_count,result.user_user_gender_id_count))
result['user_user_gender_id_buy_ratio'] = buy_ratio
"4.统计user-user_gender_id历史未被够买的次数"
result['user_user_gender_id_not_buy_count'] = result['user_user_gender_id_count'] - result['user_user_gender_id_buy_count']
return result
def get_user_item_city_feat(data,dataFeat):
"user-item_city的特征提取"
result = pd.DataFrame(dataFeat[['user_id','item_city_id']])
result = result.drop_duplicates(['user_id','item_city_id'],keep='first')
"1.统计user-item_city_id出现次数"
dataFeat['user_item_city_id_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','item_city_id'],values='user_item_city_id_count',aggfunc='count').reset_index()
del dataFeat['user_item_city_id_count']
result = pd.merge(result,feat,on=['user_id','item_city_id'],how='left')
"2.统计user-item_city_id历史被购买的次数"
dataFeat['user_item_city_id_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','item_city_id'],values='user_item_city_id_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_item_city_id_buy_count']
result = pd.merge(result,feat,on=['user_id','item_city_id'],how='left')
"3.统计user-item_city_id转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_item_city_id_buy_count,result.user_item_city_id_count))
result['user_item_city_id_buy_ratio'] = buy_ratio
"4.统计user-item_city_id历史未被够买的次数"
result['user_item_city_id_not_buy_count'] = result['user_item_city_id_count'] - result['user_item_city_id_buy_count']
return result
def get_user_context_page_feat(data,dataFeat):
"user-context_page的特征提取"
result = pd.DataFrame(dataFeat[['user_id','context_page_id']])
result = result.drop_duplicates(['user_id','context_page_id'],keep='first')
"1.统计user-context_page_id出现次数"
dataFeat['user_context_page_id_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','context_page_id'],values='user_context_page_id_count',aggfunc='count').reset_index()
del dataFeat['user_context_page_id_count']
result = pd.merge(result,feat,on=['user_id','context_page_id'],how='left')
"2.统计user-context_page_id历史被购买的次数"
dataFeat['user_context_page_id_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','context_page_id'],values='user_context_page_id_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_context_page_id_buy_count']
result = pd.merge(result,feat,on=['user_id','context_page_id'],how='left')
"3.统计user-context_page_id转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_context_page_id_buy_count,result.user_context_page_id_count))
result['user_context_page_id_buy_ratio'] = buy_ratio
"4.统计user-context_page_id历史未被够买的次数"
result['user_context_page_id_not_buy_count'] = result['user_context_page_id_count'] - result['user_context_page_id_buy_count']
return result
def get_user_user_occupation_feat(data,dataFeat):
"user-user_occupation的特征提取"
result = pd.DataFrame(dataFeat[['user_id','user_occupation_id']])
result = result.drop_duplicates(['user_id','user_occupation_id'],keep='first')
"1.统计user-user_occupation_id出现次数"
dataFeat['user_user_occupation_id_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','user_occupation_id'],values='user_user_occupation_id_count',aggfunc='count').reset_index()
del dataFeat['user_user_occupation_id_count']
result = pd.merge(result,feat,on=['user_id','user_occupation_id'],how='left')
"2.统计user-user_occupation_id历史被购买的次数"
dataFeat['user_user_occupation_id_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','user_occupation_id'],values='user_user_occupation_id_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_user_occupation_id_buy_count']
result = pd.merge(result,feat,on=['user_id','user_occupation_id'],how='left')
"3.统计user-user_occupation_id转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_user_occupation_id_buy_count,result.user_user_occupation_id_count))
result['user_user_occupation_id_buy_ratio'] = buy_ratio
"4.统计user-user_occupation_id历史未被够买的次数"
result['user_user_occupation_id_not_buy_count'] = result['user_user_occupation_id_count'] - result['user_user_occupation_id_buy_count']
return result
def get_user_shop_review_num_level_feat(data,dataFeat):
"user-shop_review_num_level的特征提取"
result = pd.DataFrame(dataFeat[['user_id','shop_review_num_level']])
result = result.drop_duplicates(['user_id','shop_review_num_level'],keep='first')
"1.统计user-shop_review_num_level出现次数"
dataFeat['user_shop_review_num_level_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','shop_review_num_level'],values='user_shop_review_num_level_count',aggfunc='count').reset_index()
del dataFeat['user_shop_review_num_level_count']
result = pd.merge(result,feat,on=['user_id','shop_review_num_level'],how='left')
"2.统计user-shop_review_num_level历史被购买的次数"
dataFeat['user_shop_review_num_level_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','shop_review_num_level'],values='user_shop_review_num_level_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_shop_review_num_level_buy_count']
result = pd.merge(result,feat,on=['user_id','shop_review_num_level'],how='left')
"3.统计user-shop_review_num_level转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_shop_review_num_level_buy_count,result.user_shop_review_num_level_count))
result['user_shop_review_num_level_buy_ratio'] = buy_ratio
"4.统计user-shop_review_num_level历史未被够买的次数"
result['user_shop_review_num_level_not_buy_count'] = result['user_shop_review_num_level_count'] - result['user_shop_review_num_level_buy_count']
return result
def get_user_item_category_list_2_feat(data,dataFeat):
"user-item_category_list_2的特征提取"
result = pd.DataFrame(dataFeat[['user_id','item_category_list_2']])
result = result.drop_duplicates(['user_id','item_category_list_2'],keep='first')
"1.统计user-item_category_list_2出现次数"
dataFeat['user_item_category_list_2_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','item_category_list_2'],values='user_item_category_list_2_count',aggfunc='count').reset_index()
del dataFeat['user_item_category_list_2_count']
result = pd.merge(result,feat,on=['user_id','item_category_list_2'],how='left')
"2.统计user-item_category_list_2历史被购买的次数"
dataFeat['user_item_category_list_2_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','item_category_list_2'],values='user_item_category_list_2_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_item_category_list_2_buy_count']
result = pd.merge(result,feat,on=['user_id','item_category_list_2'],how='left')
"3.统计user-item_category_list_2转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_item_category_list_2_buy_count,result.user_item_category_list_2_count))
result['user_item_category_list_2_buy_ratio'] = buy_ratio
"4.统计user-item_category_list_2历史未被够买的次数"
result['user_item_category_list_2_not_buy_count'] = result['user_item_category_list_2_count'] - result['user_item_category_list_2_buy_count']
return result
def merge_feat(data,dataFeat):
"特征的merge"
#生成特征
item = get_item_feat(data,dataFeat)
user = get_user_feat(data,dataFeat)
context = get_context_feat(data,dataFeat)
shop = get_shop_feat(data,dataFeat)
timestamp = get_timestamp_feat(data,dataFeat)
item_brand = get_item_brand_feat(data,dataFeat)
user_gender = get_user_gender_feat(data,dataFeat)
item_city = get_item_city_feat(data,dataFeat)
context_page = get_context_page_feat(data,dataFeat)
user_occupation = get_user_occupation_feat(data,dataFeat)
shop_review_num_level = get_shop_review_num_level_feat(data,dataFeat)
item_category_list_2 = get_item_category_list_2_feat(data,dataFeat)
#交互特征
user_item = get_user_item_feat(data,dataFeat)
user_shop = get_user_shop_feat(data,dataFeat)
user_context = get_user_context_feat(data,dataFeat)
user_timestamp = get_user_timestamp_feat(data,dataFeat)
user_item_brand = get_user_item_brand_feat(data,dataFeat)
user_user_gender = get_user_user_gender_feat(data,dataFeat)
user_item_city = get_user_item_city_feat(data,dataFeat)
user_context_page = get_user_context_page_feat(data,dataFeat)
user_user_occupation = get_user_user_occupation_feat(data,dataFeat)
user_shop_review_num_level = get_user_shop_review_num_level_feat(data,dataFeat)
user_item_category_list_2 = get_user_item_category_list_2_feat(data,dataFeat)
#merge特征
data = pd.merge(data,item,on='item_id',how='left')
data = pd.merge(data,user,on='user_id',how='left')
data = pd.merge(data,context,on='context_id',how='left')
data = pd.merge(data,timestamp,on='context_timestamp',how='left')
data = pd.merge(data,shop,on='shop_id',how='left')
data = pd.merge(data,item_brand,on='item_brand_id',how='left')
data = pd.merge(data,user_gender,on='user_gender_id',how='left')
data = pd.merge(data,item_city,on='item_city_id',how='left')
data = pd.merge(data,context_page,on='context_page_id',how='left')
data = pd.merge(data,user_occupation,on='user_occupation_id',how='left')
data = pd.merge(data,shop_review_num_level,on='shop_review_num_level',how='left')
data = pd.merge(data,item_category_list_2,on='item_category_list_2',how='left')
#交互特征
data = pd.merge(data,user_item,on=['user_id','item_id'],how='left')
data = pd.merge(data,user_shop,on=['user_id','shop_id'],how='left')
data = pd.merge(data,user_context,on=['user_id','context_id'],how='left')
data = pd.merge(data,user_timestamp,on=['user_id','context_timestamp'],how='left')
data = pd.merge(data,user_item_brand,on=['user_id','item_brand_id'],how='left')
data = pd.merge(data,user_user_gender,on=['user_id','user_gender_id'],how='left')
data = pd.merge(data,user_item_city,on=['user_id','item_city_id'],how='left')
data = pd.merge(data,user_context_page,on=['user_id','context_page_id'],how='left')
data = | pd.merge(data,user_user_occupation,on=['user_id','user_occupation_id'],how='left') | pandas.merge |
import pandas as pd
import numpy as np
import itertools
import warnings
import scipy.cluster.hierarchy as sch
from scipy.spatial import distance
from joblib import Parallel, delayed
__all__ = ['hcluster_tally',
'neighborhood_tally',
'running_neighborhood_tally',
'any_cluster_tally']
"""TODO:
* Incorporate running_neighbors into TCRdist, wrapping the standard metrics so they can work
easily.
* Verify that running_neighbor uses all CPUs and less memory: see how it could be further optimized
with joblib caching, expecially for the metrics that include the CDR2 and CDR1.5 etc.
"""
def _counts_to_cols(counts):
"""Encodes the counts Series as columns that can be added to a takky result row
Example counts table:
trait1 trait2 cmember
0 0 0 233
1 226
1 0 71
1 79
1 0 0 0
1 0
1 0 0
1 9"""
j = 0
cols = tuple(counts.index.names)
levels = []
for name, lev in zip(counts.index.names, counts.index.levels):
if len(lev) == 1:
"""This solves the problem of when a variable with one level is included
by accident or e.g. all instances are cmember = 1 (top node, big R)"""
if name == 'cmember':
levels.append(('MEM+', 'MEM-'))
elif isinstance(lev[0], int):
levels.append(tuple(sorted((0, lev[0]))))
else:
levels.append(tuple(sorted(('REF', lev[0]))))
else:
levels.append(tuple(lev))
levels = tuple(levels)
out = {'ct_columns':cols}
for xis in itertools.product(*(range(len(u)) for u in levels)):
vals = []
for ui, (col, u, xi) in enumerate(zip(counts.index.names, levels, xis)):
vals.append(u[xi])
try:
ct = counts.loc[tuple(vals)]
except (pd.core.indexing.IndexingError, KeyError):
ct = 0
out.update({'val_%d' % j:tuple(vals),
'ct_%d' % j:ct})
j += 1
return out
def _dict_to_nby2(d):
"""Takes the encoded columns of counts from a results row and re-creates the counts table"""
cols = d['ct_columns']
n = np.max([int(k.split('_')[1]) for k in d if 'val_' in k]) + 1
cts = [d['ct_%d' % j] for j in range(n)]
idx = pd.MultiIndex.from_tuples([d['val_%d' % j] for j in range(n)], names=cols)
counts = pd.Series(cts, index=idx)
return counts
def _prep_counts(cdf, xcols, ycol, count_col):
"""Returns a dict with keys that can be added to a result row to store tallies
For a 2x2 table the data is encoded as follows
X+MEM+ encodes the first level in Y (cluster membership = MEM+) and X
and out contains columns named val_j and ct_j where j is ravel order, such that
the values of a 2x2 table (a, b, c, d) are:
ct_0 X-MEM+ a First level of X and a cluster member ("M+" which sorts before "M-" so is also first level)
ct_1 X-MEM- b First level of X and a non member
ct_2 X+MEM+ c Second level of X and a cluster member
ct_3 X+MEM- d Second level of X and a non member
val_j also encodes explictly the values of the X levels and cluster membership indicator (MEM+ = member)
This means that an OR > 1 is enrichment of the SECOND level of X in the cluster.
Longer tables are stored in ravel order with ct_j/val_j pairs with val_j containing the values
of each column/variable.
Key "ct_columns" contains the xcols and ycol as a list
Ket levels contains the levels of xcols and ycol as lists from a pd.Series.MultiIndex"""
counts = cdf.groupby(xcols + [ycol], sort=True)[count_col].agg(np.sum)
out = _counts_to_cols(counts)
counts = _dict_to_nby2(out)
out['levels'] = [list(lev) for lev in counts.index.levels]
if len(xcols) == 1 and counts.shape[0] == 4:
"""For a 2x2 add helpful count and probability columns
Note that the first level of a column/variable is "negative"
because its index in levels is 0"""
n = counts.sum()
levels = counts.index.levels
tmp = {'X+MEM+':counts[(levels[0][1], 'MEM+')],
'X+MEM-':counts[(levels[0][1], 'MEM-')],
'X-MEM+':counts[(levels[0][0], 'MEM+')],
'X-MEM-':counts[(levels[0][0], 'MEM-')]}
with warnings.catch_warnings():
warnings.simplefilter('ignore')
tmp.update({'X_marg':(tmp['X+MEM+'] + tmp['X+MEM-']) / n,
'MEM_marg':(tmp['X+MEM+'] + tmp['X-MEM+']) / n,
'X|MEM+':tmp['X+MEM+'] / (tmp['X+MEM+'] + tmp['X-MEM+']),
'X|MEM-':tmp['X+MEM-'] / (tmp['X+MEM-'] + tmp['X-MEM-']),
'MEM|X+':tmp['X+MEM+'] / (tmp['X+MEM+'] + tmp['X+MEM-']),
'MEM|X-':tmp['X-MEM+'] / (tmp['X-MEM+'] + tmp['X-MEM-'])})
out.update(tmp)
return out
def neighborhood_tally(df_pop, pwmat, x_cols, df_centroids=None, count_col='count', knn_neighbors=50, knn_radius=None):
"""Forms a cluster around each row of df and tallies the number of instances with/without traits
in x_cols. The contingency table for each cluster/row of df can be used to test for enrichments of the traits
in x_cols with the distances between each row provided in pwmat. The neighborhood is defined by the K closest neighbors
using pairwise distances in pwmat, or defined by a distance radius.
For TCR analysis this can be used to test whether the TCRs in a neighborhood are associated with a certain trait or
phenotype. You can use hier_diff.cluster_association_test with the output of this function to test for
significnt enrichment.
Note on output: val_j/ct_j pairs provide the counts for each element of the n x 2 continency table where the last
dimension is always 'cmember' (MEM+ or MEM-) indicating cluster membership for each row. The X+MEM+ notation
is provided for convenience for 2x2 tables and X+ indicates the second level of x_col when sorted (e.g. 1 for [0, 1]).
Params
------
df_pop : pd.DataFrame [nclones x metadata]
Contains metadata for each clone in the population to be tallied.
pwmat : np.ndarray [df_centroids.shape[0] x df_pop.shape[0]]
Pairwise distance matrix for defining neighborhoods.
Number of rows in pwmat must match the number of rows in df_centroids,
which may be the number of rows in df_pop if df_centroids=None
x_cols : list
List of columns to be tested for association with the neighborhood
df_centroids : pd.DataFrame [nclones x 1]
An optional DataFrame containing clones that will act as centroids in the
neighborhood clustering. These can be a subset of df_pop or not, however
the number of rows in df_centroids must match the number of rows in pwmat.
If df_centroids=None then df_centroids = df_pop and all clones in df_pop
are used.
count_col : str
Column in df that specifies counts.
Default none assumes count of 1 cell for each row.
knn_neighbors : int
Number of neighbors to include in the neighborhood, or fraction of all data if K < 1
knn_radius : float
Radius for inclusion of neighbors within the neighborhood.
Specify K or R but not both.
Returns
-------
res_df : pd.DataFrame [nclones x results]
Counts of clones within each neighborhood, grouped by x_cols.
The "neighbors" column provides the pd.DataFrame indices of the elements in
df_pop that are within the neighborhood of each centroid (not the integer/vector
based indices)"""
if knn_neighbors is None and knn_radius is None:
raise(ValueError('Must specify K or radius'))
if not knn_neighbors is None and not knn_radius is None:
raise(ValueError('Must specify K or radius (not both)'))
if df_centroids is None:
df_centroids = df_pop
if pwmat.shape[0] != df_pop.shape[0]:
raise ValueError(f'Number of rows in pwmat {pwmat.shape[0]} does not match df_pop {df_pop.shape[0]}')
if pwmat.shape[1] != df_pop.shape[0]:
raise ValueError(f'Number of columns in pwmat {pwmat.shape[1]} does not match df_pop {df_pop.shape[0]}')
else:
if pwmat.shape[0] != df_centroids.shape[0]:
raise ValueError(f'Number of rows in pwmat {pwmat.shape[0]} does not match df_centroids {df_centroids.shape[0]}')
if pwmat.shape[1] != df_pop.shape[0]:
raise ValueError(f'Number of columns in pwmat {pwmat.shape[1]} does not match df_pop {df_pop.shape[0]}')
if count_col is None:
df = df_pop.assign(count=1)
count_col = 'count'
ycol = 'cmember'
res = []
for ii in range(df_centroids.shape[0]):
if not knn_neighbors is None:
if knn_neighbors < 1:
frac = knn_neighbors
K = int(knn_neighbors * df_pop.shape[0])
# print('Using K = %d (%1.0f%% of %d)' % (K, 100*frac, n))
else:
K = int(knn_neighbors)
R = np.partition(pwmat[ii, :], K)[K]
else:
R = knn_radius
y_lu = {True:'MEM+', False:'MEM-'}
y_float = (pwmat[ii, :] <= R).astype(float)
y = np.array([y_lu[yy] for yy in y_float])
K = int(np.sum(y_float))
cdf = df_pop.assign(**{ycol:y})[[ycol, count_col] + x_cols]
out = _prep_counts(cdf, x_cols, ycol, count_col)
out.update({'index':ii,
'neighbors':list(df_pop.index[np.nonzero(y_float)[0]]),
'K_neighbors':K,
'R_radius':R})
res.append(out)
res_df = pd.DataFrame(res)
return res_df
def any_cluster_tally(df, cluster_df, x_cols, cluster_ind_col='neighbors', count_col='count'):
"""Tallies clones inside (outside) each cluster for testing enrichment of other categorical
variables defined by x_cols in df. Clusters are defined in cluster_df using the cluster_ind_col
(default: 'neighbors') which should contain *positional* indices into df for cluster members.
This function only organizes the counts for testing such that each row of the output represents
a cluster that could be tested for enrichment.
As an example, one could use Fisher's exact test to detect enrichment/association of the
neighborhood/cluster with one variable.
Tests the 2 x 2 table for each clone:
+----+----+-------+--------+
| | Cluster |
| +-------+--------+
| | Y | N |
+----+----+-------+--------+
|VAR | 1 | a | b |
| +----+-------+--------+
| | 0 | c | d |
+----+----+-------+--------+
This and other tests are available with the cluster_association_test function that takes the output
of this function as input.
Params
------
df : pd.DataFrame [nclones x metadata]
Contains metadata for each clone.
cluster_df : pd.DataFrame, one row per cluster
Contains the column in cluster_ind_col (default: "neighbors") that should
contain positional indices into df indicating cluster membership
x_cols : list
List of columns to be tested for association with the neighborhood
count_col : str
Column in df that specifies counts.
Default none assumes count of 1 cell for each row.
cluster_ind_col : str, column in cluster_df
Values should be lists or tuples of positional indices into df
Returns
-------
res_df : pd.DataFrame [nclusters x results]
A 2xN table for each cluster."""
ycol = 'cmember'
if count_col is None:
df = df.assign(count=1)
count_col = 'count'
n = df.shape[0]
res = []
for cid, m in cluster_df[cluster_ind_col].values:
not_m = [i for i in range(n) if not i in m]
y_float = np.zeros(n, dtype=np.int)
y_float[m] = 1
y_lu = {1:'MEM+', 0:'MEM-'}
y = np.array([y_lu[yy] for yy in y_float])
K = int(np.sum(y_float))
cdf = df.assign(**{ycol:y})[[ycol, count_col] + x_cols]
out = _prep_counts(cdf, x_cols, ycol, count_col)
out.update({'cid':cid,
'neighbors':list(df.index[m]),
'neighbors_i':m,
'K_neighbors':K})
res.append(out)
res_df = pd.DataFrame(res)
return res_df
def hcluster_tally(df, pwmat, x_cols, Z=None, count_col='count', subset_ind=None, method='complete', optimal_ordering=True):
"""Hierarchical clustering of clones with distances in pwmat. Tallies clones inside (outside) each cluster in preparation
for testing enrichment of other categorical variables defined by x_cols. This function only organizes the counts for testing
such that each row of the output represents a cluster that could be tested for enrichment.
One example test is Fisher's exact test to detect enrichment/association of the neighborhood/cluster
with one binary variable.
Tests the 2 x 2 table for each clone:
+----+----+-------+--------+
| | Cluster |
| +-------+--------+
| | Y | N |
+----+----+-------+--------+
|VAR | 1 | a | b |
| +----+-------+--------+
| | 0 | c | d |
+----+----+-------+--------+
This and other tests are available with the cluster_association_test function that takes the output
of this function as input.
Params
------
df : pd.DataFrame [nclones x metadata]
Contains metadata for each clone.
pwmat : np.ndarray [nclones x nclones]
Square or compressed (see scipy.spatial.distance.squareform) distance
matrix for defining clusters.
x_cols : list
List of columns to be tested for association with the neighborhood
count_col : str
Column in df that specifies counts.
Default none assumes count of 1 cell for each row.
subset_ind : partial index of df, optional
Provides option to tally counts only within a subset of df, but to maintain the clustering
of all individuals. Allows for one clustering of pooled TCRs,
but tallying/testing within a subset (e.g. participants or conditions)
optimal_ordering : bool
If True, the linkage matrix will be reordered so that the distance between successive
leaves is minimal. This results in a more intuitive tree structure when the data are
visualized. defaults to False, because this algorithm can be slow, particularly on large datasets.
Returns
-------
res_df : pd.DataFrame [nclusters x results]
A 2xN table for each cluster.
Z : linkage matrix [nclusters, df.shape[0] - 1, 4]
Clustering result returned from scipy.cluster.hierarchy.linkage"""
ycol = 'cmember'
if Z is None:
if pwmat.shape[0] == pwmat.shape[1] and pwmat.shape[0] == df.shape[0]:
compressed = distance.squareform(pwmat)
else:
compressed = pwmat
pwmat = distance.squareform(pwmat)
Z = sch.linkage(compressed, method=method, optimal_ordering=optimal_ordering)
else:
"""Shape of correct Z asserted here"""
if not Z.shape == (df.shape[0] - 1, 4):
raise ValueError('First dimension of Z (%d) does not match that of df (%d,)' % (Z.shape[0], df.shape[0]))
if count_col is None:
df = df.assign(count=1)
count_col = 'count'
clusters = {}
for i, merge in enumerate(Z):
"""Cluster ID number starts at a number after all the leaves"""
cid = 1 + i + Z.shape[0]
clusters[cid] = [merge[0], merge[1]]
def _get_indices(clusters, i):
if i <= Z.shape[0]:
return [int(i)]
else:
return _get_indices(clusters, clusters[i][0]) + _get_indices(clusters, clusters[i][1])
def _get_cluster_indices(clusters, i):
if i <= Z.shape[0]:
return []
else:
return [int(i)] + _get_cluster_indices(clusters, clusters[i][0]) + _get_cluster_indices(clusters, clusters[i][1])
members = {i:_get_indices(clusters, i) for i in range(Z.shape[0] + 1, max(clusters.keys()) + 1)}
"""Note that the list of clusters within each cluster includes the current cluster"""
cluster_members = {i:_get_cluster_indices(clusters, i) for i in range(Z.shape[0] + 1, max(clusters.keys()) + 1)}
n = df.shape[0]
res = []
"""Setting non-group counts to zero"""
if not subset_ind is None:
clone_tmp = df.copy()
"""Set counts to zero for all clones that are not in the group being tested"""
not_ss = [ii for ii in df.index if not ii in subset_ind]
clone_tmp.loc[not_ss, count_col] = 0
else:
clone_tmp = df
for cid, m in members.items():
not_m = [i for i in range(n) if not i in m]
y_float = np.zeros(n, dtype=np.int)
y_float[m] = 1
y_lu = {1:'MEM+', 0:'MEM-'}
y = np.array([y_lu[yy] for yy in y_float])
K = int(np.sum(y_float))
R = np.max(pwmat[m, :][:, m])
cdf = clone_tmp.assign(**{ycol:y})[[ycol, count_col] + x_cols]
out = _prep_counts(cdf, x_cols, ycol, count_col)
out.update({'cid':cid,
'neighbors':list(clone_tmp.index[m]),
'neighbors_i':m,
'children':cluster_members[cid],
'K_neighbors':K,
'R_radius':R})
res.append(out)
res_df = pd.DataFrame(res)
return res_df, Z
def running_neighborhood_tally(df, dist_func, dist_cols, x_cols, count_col='count', knn_neighbors=50, knn_radius=None, cluster_ind=None, ncpus=1):
"""Forms a cluster around each row of df and tallies the number of instances with/without traits
in x_cols. The contingency table for each cluster/row of df can be used to test for enrichments of the traits
in x_cols. The neighborhood is defined by the K closest neighbors using dist_func, or defined by a distance radius.
Identical output to neighborhood_tally, however memory footprint will be lower for large datasets, at the cost of
increased computation. Computation is parallelized using joblib, with memory caching optional.
For TCR analysis this can be used to test whether the TCRs in a neighborhood are associated with a certain trait or
phenotype. You can use hier_diff.cluster_association_test with the output of this function to test for
significnt enrichment.
Note on output: val_j/ct_j pairs provide the counts for each element of the n x 2 continency table where the last
dimension is always 'cmember' (MEM+ or MEM-) indicating cluster membership for each row. The X+MEM+ notation
is provided for convenience for 2x2 tables and X+ indicates the second level of x_col when sorted (e.g. 1 for [0, 1]).
Params
------
df : pd.DataFrame [nclones x metadata]
Contains metadata for each clone.
dist_func : function
Function that accepts two dicts representing the two TCRs being compared,
as well as an optional third dict that will maintain a cache of components
of the distance that should be stored for fast, repeated access (e.g. pairwise
distances among CDR2 loops, which are much less diverse)
x_cols : list
List of columns to be tested for association with the neighborhood
count_col : str
Column in df that specifies counts.
Default none assumes count of 1 cell for each row.
knn_neighbors : int
Number of neighbors to include in the neighborhood, or fraction of all data if K < 1
knn_radius : float
Radius for inclusion of neighbors within the neighborhood.
Specify K or R but not both.
subset_ind : None or np.ndarray with partial index of df, optional
Provides option to tally counts only within a subset of df, but to maintain the clustering
of all individuals. Allows for one clustering of pooled TCRs,
but tallying/testing within a subset (e.g. participants or conditions)
cluster_ind : None or np.ndarray
Indices into df specifying the neighborhoods for testing.
Returns
-------
res_df : pd.DataFrame [nclones x results]
Results from testing the neighborhood around each clone."""
if knn_neighbors is None and knn_radius is None:
raise(ValueError('Must specify K or radius'))
if not knn_neighbors is None and not knn_radius is None:
raise(ValueError('Must specify K or radius (not both)'))
if count_col is None:
df = df.assign(count=1)
count_col = 'count'
if cluster_ind is None:
cluster_ind = df.index
tally_params = dict(df=df,
dist_func=dist_func,
dist_cols=dist_cols,
x_cols=x_cols,
count_col=count_col,
knn_neighbors=knn_neighbors,
knn_radius=knn_radius)
res = Parallel(n_jobs=ncpus)(delayed(_tally_one)(clonei=clonei, **tally_params) for clonei in cluster_ind)
"""
res = []
for clonei in cluster_ind:
out = _tally_one(df, clonei, dist_func, dist_cols, x_cols, count_col, knn_neighbors, knn_radius)
res.append(out)
"""
res_df = | pd.DataFrame(res) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([NaT, Timestamp("19900315")]),
Series([NaT, NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
def test_dt64_series_add_intlike(self, tz_naive_fixture):
# GH#19123
tz = tz_naive_fixture
dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
assert_invalid_addsub_type(ser, 1, msg)
assert_invalid_addsub_type(ser, other, msg)
assert_invalid_addsub_type(ser, np.array(other), msg)
assert_invalid_addsub_type(ser, pd.Index(other), msg)
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
assert td2._values.freq is None
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_addsub_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
rng + one
with pytest.raises(TypeError, match=msg):
rng += one
with pytest.raises(TypeError, match=msg):
rng - one
with pytest.raises(TypeError, match=msg):
rng -= one
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
msg = "|".join(
["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
)
assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .*TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
# DTA.__isub__ GH#43904
dta = dti._data.copy()
dta -= tdi
tm.assert_datetime_array_equal(dta, expected._data)
out = dti._data.copy()
np.subtract(out, tdi, out=out)
tm.assert_datetime_array_equal(out, expected._data)
msg = "cannot subtract .* from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract DatetimeArray from ndarray"
with pytest.raises(TypeError, match=msg):
tdi.values -= dti
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi._values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
@pytest.mark.parametrize(
"addend",
[
datetime(2011, 1, 1),
DatetimeIndex(["2011-01-01", "2011-01-02"]),
DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize("US/Eastern"),
np.datetime64("2011-01-01"),
Timestamp("2011-01-01"),
],
ids=lambda x: type(x).__name__,
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):
# GH#9631
dti = DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize(tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add DatetimeArray and"
assert_cannot_add(dtarr, addend, msg)
# -------------------------------------------------------------
def test_dta_add_sub_index(self, tz_naive_fixture):
# Check that DatetimeArray defers to Index classes
dti = date_range("20130101", periods=3, tz=tz_naive_fixture)
dta = dti.array
result = dta - dti
expected = dti - dti
tm.assert_index_equal(result, expected)
tdi = result
result = dta + tdi
expected = dti + tdi
tm.assert_index_equal(result, expected)
result = dta - tdi
expected = dti - tdi
tm.assert_index_equal(result, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range("20130101", periods=3)
dti_tz = date_range("20130101", periods=3).tz_localize("US/Eastern")
dti_tz2 = date_range("20130101", periods=3).tz_localize("UTC")
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
msg = "DatetimeArray subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dti_tz - dti
with pytest.raises(TypeError, match=msg):
dti - dti_tz
with pytest.raises(TypeError, match=msg):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range("20130101", periods=3)
dti2 = date_range("20130101", periods=4)
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"])
dti2 = DatetimeIndex(["2012-01-02", "2012-01-03", np.nan])
expected = TimedeltaIndex(["1 days", np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------------
# TODO: Most of this block is moved from series or frame tests, needs
# cleanup, box-parametrization, and de-duplication
@pytest.mark.parametrize("op", [operator.add, operator.sub])
def test_timedelta64_equal_timedelta_supported_ops(self, op, box_with_array):
ser = Series(
[
Timestamp("20130301"),
Timestamp("20130228 23:00:00"),
Timestamp("20130228 22:00:00"),
Timestamp("20130228 21:00:00"),
]
)
obj = box_with_array(ser)
intervals = ["D", "h", "m", "s", "us"]
def timedelta64(*args):
# see casting notes in NumPy gh-12927
return np.sum(list(starmap(np.timedelta64, zip(args, intervals))))
for d, h, m, s, us in product(*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us)
lhs = op(obj, nptd)
rhs = op(obj, pytd)
tm.assert_equal(lhs, rhs)
def test_ops_nat_mixed_datetime64_timedelta64(self):
# GH#11349
timedelta_series = Series([NaT, Timedelta("1s")])
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")
# subtraction
tm.assert_series_equal(
datetime_series - single_nat_dtype_datetime, nat_series_dtype_timedelta
)
tm.assert_series_equal(
datetime_series - single_nat_dtype_timedelta, nat_series_dtype_timestamp
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + datetime_series, nat_series_dtype_timestamp
)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_datetime,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
msg = "cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
timedelta_series - single_nat_dtype_datetime
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_datetime,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_datetime + nat_series_dtype_timedelta,
nat_series_dtype_timestamp,
)
def test_ufunc_coercions(self):
idx = date_range("2011-01-01", periods=3, freq="2D", name="x")
delta = np.timedelta64(1, "D")
exp = date_range("2011-01-02", periods=3, freq="2D", name="x")
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
exp = date_range("2010-12-31", periods=3, freq="2D", name="x")
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
# When adding/subtracting an ndarray (which has no .freq), the result
# does not infer freq
idx = idx._with_freq(None)
delta = np.array(
[np.timedelta64(1, "D"), np.timedelta64(2, "D"), np.timedelta64(3, "D")]
)
exp = DatetimeIndex(["2011-01-02", "2011-01-05", "2011-01-08"], name="x")
for result in [idx + delta, np.add(idx, delta)]:
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
exp = DatetimeIndex(["2010-12-31", "2011-01-01", "2011-01-02"], name="x")
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
def test_dti_add_series(self, tz_naive_fixture, names):
# GH#13905
tz = tz_naive_fixture
index = DatetimeIndex(
["2016-06-28 05:30", "2016-06-28 05:31"], tz=tz, name=names[0]
)
ser = Series([Timedelta(seconds=5)] * 2, index=index, name=names[1])
expected = Series(index + Timedelta(seconds=5), index=index, name=names[2])
# passing name arg isn't enough when names[2] is None
expected.name = names[2]
assert expected.dtype == index.dtype
result = ser + index
tm.assert_series_equal(result, expected)
result2 = index + ser
tm.assert_series_equal(result2, expected)
expected = index + Timedelta(seconds=5)
result3 = ser.values + index
tm.assert_index_equal(result3, expected)
result4 = index + ser.values
tm.assert_index_equal(result4, expected)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
def test_dti_addsub_offset_arraylike(
self, tz_naive_fixture, names, op, index_or_series
):
# GH#18849, GH#19744
other_box = index_or_series
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz, name=names[0])
other = other_box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1])
xbox = get_upcast_box(dti, other)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dti, other)
expected = DatetimeIndex(
[op(dti[n], other[n]) for n in range(len(dti))], name=names[2], freq="infer"
)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
@pytest.mark.parametrize("other_box", [pd.Index, np.array])
def test_dti_addsub_object_arraylike(
self, tz_naive_fixture, box_with_array, other_box
):
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = other_box([pd.offsets.MonthEnd(), Timedelta(days=4)])
xbox = get_upcast_box(dtarr, other)
expected = DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox)
with tm.assert_produces_warning(PerformanceWarning):
result = dtarr + other
| tm.assert_equal(result, expected) | pandas._testing.assert_equal |
import operator
import warnings
import numpy as np
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range, to_timedelta
import pandas._testing as tm
from pandas.core.algorithms import checked_add_with_arr
from .pandas_vb_common import numeric_dtypes
try:
import pandas.core.computation.expressions as expr
except ImportError:
import pandas.computation.expressions as expr
try:
import pandas.tseries.holiday
except ImportError:
pass
class IntFrameWithScalar:
params = [
[np.float64, np.int64],
[2, 3.0, np.int32(4), np.float64(5)],
[
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.floordiv,
operator.pow,
operator.mod,
operator.eq,
operator.ne,
operator.gt,
operator.ge,
operator.lt,
operator.le,
],
]
param_names = ["dtype", "scalar", "op"]
def setup(self, dtype, scalar, op):
arr = np.random.randn(20000, 100)
self.df = DataFrame(arr.astype(dtype))
def time_frame_op_with_scalar(self, dtype, scalar, op):
op(self.df, scalar)
class OpWithFillValue:
def setup(self):
# GH#31300
arr = np.arange(10 ** 6)
df = DataFrame({"A": arr})
ser = df["A"]
self.df = df
self.ser = ser
def time_frame_op_with_fill_value_no_nas(self):
self.df.add(self.df, fill_value=4)
def time_series_op_with_fill_value_no_nas(self):
self.ser.add(self.ser, fill_value=4)
class MixedFrameWithSeriesAxis:
params = [
[
"eq",
"ne",
"lt",
"le",
"ge",
"gt",
"add",
"sub",
"truediv",
"floordiv",
"mul",
"pow",
]
]
param_names = ["opname"]
def setup(self, opname):
arr = np.arange(10 ** 6).reshape(1000, -1)
df = DataFrame(arr)
df["C"] = 1.0
self.df = df
self.ser = df[0]
self.row = df.iloc[0]
def time_frame_op_with_series_axis0(self, opname):
getattr(self.df, opname)(self.ser, axis=0)
def time_frame_op_with_series_axis1(self, opname):
getattr(operator, opname)(self.df, self.ser)
class Ops:
params = [[True, False], ["default", 1]]
param_names = ["use_numexpr", "threads"]
def setup(self, use_numexpr, threads):
self.df = DataFrame(np.random.randn(20000, 100))
self.df2 = DataFrame(np.random.randn(20000, 100))
if threads != "default":
expr.set_numexpr_threads(threads)
if not use_numexpr:
expr.set_use_numexpr(False)
def time_frame_add(self, use_numexpr, threads):
self.df + self.df2
def time_frame_mult(self, use_numexpr, threads):
self.df * self.df2
def time_frame_multi_and(self, use_numexpr, threads):
self.df[(self.df > 0) & (self.df2 > 0)]
def time_frame_comparison(self, use_numexpr, threads):
self.df > self.df2
def teardown(self, use_numexpr, threads):
expr.set_use_numexpr(True)
expr.set_numexpr_threads()
class Ops2:
def setup(self):
N = 10 ** 3
self.df = DataFrame(np.random.randn(N, N))
self.df2 = DataFrame(np.random.randn(N, N))
self.df_int = DataFrame(
np.random.randint(
np.iinfo(np.int16).min, np.iinfo(np.int16).max, size=(N, N)
)
)
self.df2_int = DataFrame(
np.random.randint(
np.iinfo(np.int16).min, np.iinfo(np.int16).max, size=(N, N)
)
)
self.s = Series(np.random.randn(N))
# Division
def time_frame_float_div(self):
self.df // self.df2
def time_frame_float_div_by_zero(self):
self.df / 0
def time_frame_float_floor_by_zero(self):
self.df // 0
def time_frame_int_div_by_zero(self):
self.df_int / 0
# Modulo
def time_frame_int_mod(self):
self.df_int % self.df2_int
def time_frame_float_mod(self):
self.df % self.df2
# Dot product
def time_frame_dot(self):
self.df.dot(self.df2)
def time_series_dot(self):
self.s.dot(self.s)
def time_frame_series_dot(self):
self.df.dot(self.s)
class Timeseries:
params = [None, "US/Eastern"]
param_names = ["tz"]
def setup(self, tz):
N = 10 ** 6
halfway = (N // 2) - 1
self.s = Series(date_range("20010101", periods=N, freq="T", tz=tz))
self.ts = self.s[halfway]
self.s2 = Series(date_range("20010101", periods=N, freq="s", tz=tz))
def time_series_timestamp_compare(self, tz):
self.s <= self.ts
def time_timestamp_series_compare(self, tz):
self.ts >= self.s
def time_timestamp_ops_diff(self, tz):
self.s2.diff()
def time_timestamp_ops_diff_with_shift(self, tz):
self.s - self.s.shift()
class IrregularOps:
def setup(self):
N = 10 ** 5
idx = date_range(start="1/1/2000", periods=N, freq="s")
s = Series(np.random.randn(N), index=idx)
self.left = s.sample(frac=1)
self.right = s.sample(frac=1)
def time_add(self):
self.left + self.right
class TimedeltaOps:
def setup(self):
self.td = to_timedelta(np.arange(1000000))
self.ts = Timestamp("2000")
def time_add_td_ts(self):
self.td + self.ts
class CategoricalComparisons:
params = ["__lt__", "__le__", "__eq__", "__ne__", "__ge__", "__gt__"]
param_names = ["op"]
def setup(self, op):
N = 10 ** 5
self.cat = pd.Categorical(list("aabbcd") * N, ordered=True)
def time_categorical_op(self, op):
getattr(self.cat, op)("b")
class IndexArithmetic:
params = ["float", "int"]
param_names = ["dtype"]
def setup(self, dtype):
N = 10 ** 6
indexes = {"int": "makeIntIndex", "float": "makeFloatIndex"}
self.index = getattr(tm, indexes[dtype])(N)
def time_add(self, dtype):
self.index + 2
def time_subtract(self, dtype):
self.index - 2
def time_multiply(self, dtype):
self.index * 2
def time_divide(self, dtype):
self.index / 2
def time_modulo(self, dtype):
self.index % 2
class NumericInferOps:
# from GH 7332
params = numeric_dtypes
param_names = ["dtype"]
def setup(self, dtype):
N = 5 * 10 ** 5
self.df = DataFrame(
{"A": np.arange(N).astype(dtype), "B": np.arange(N).astype(dtype)}
)
def time_add(self, dtype):
self.df["A"] + self.df["B"]
def time_subtract(self, dtype):
self.df["A"] - self.df["B"]
def time_multiply(self, dtype):
self.df["A"] * self.df["B"]
def time_divide(self, dtype):
self.df["A"] / self.df["B"]
def time_modulo(self, dtype):
self.df["A"] % self.df["B"]
class DateInferOps:
# from GH 7332
def setup_cache(self):
N = 5 * 10 ** 5
df = DataFrame({"datetime64": np.arange(N).astype("datetime64[ms]")})
df["timedelta"] = df["datetime64"] - df["datetime64"]
return df
def time_subtract_datetimes(self, df):
df["datetime64"] - df["datetime64"]
def time_timedelta_plus_datetime(self, df):
df["timedelta"] + df["datetime64"]
def time_add_timedeltas(self, df):
df["timedelta"] + df["timedelta"]
class AddOverflowScalar:
params = [1, -1, 0]
param_names = ["scalar"]
def setup(self, scalar):
N = 10 ** 6
self.arr = np.arange(N)
def time_add_overflow_scalar(self, scalar):
checked_add_with_arr(self.arr, scalar)
class AddOverflowArray:
def setup(self):
N = 10 ** 6
self.arr = np.arange(N)
self.arr_rev = np.arange(-N, 0)
self.arr_mixed = np.array([1, -1]).repeat(N / 2)
self.arr_nan_1 = np.random.choice([True, False], size=N)
self.arr_nan_2 = np.random.choice([True, False], size=N)
def time_add_overflow_arr_rev(self):
checked_add_with_arr(self.arr, self.arr_rev)
def time_add_overflow_arr_mask_nan(self):
checked_add_with_arr(self.arr, self.arr_mixed, arr_mask=self.arr_nan_1)
def time_add_overflow_b_mask_nan(self):
checked_add_with_arr(self.arr, self.arr_mixed, b_mask=self.arr_nan_1)
def time_add_overflow_both_arg_nan(self):
checked_add_with_arr(
self.arr, self.arr_mixed, arr_mask=self.arr_nan_1, b_mask=self.arr_nan_2
)
hcal = pd.tseries.holiday.USFederalHolidayCalendar()
# These offsets currently raise a NotImplimentedError with .apply_index()
non_apply = [
| pd.offsets.Day() | pandas.offsets.Day |
# how to run locally
# python text_loc_data.py $(cat ../../debugcommand.txt)
import base64
import io
import json
import math
import os
import re
import sys
import warnings
from urllib.parse import quote
import cv2
import numpy as np
import pandas as pd
from deskew import determine_skew
from google.cloud import vision
from google.cloud.vision_v1 import types
warnings.filterwarnings("ignore")
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r'ServiceAccountToken.json'
client = vision.ImageAnnotatorClient()
content = ""
img = ""
FILE_NAME = 'debugImage.png'
FOLDER_PATH = 'text_images'
def readb64(data):
nparr = np.fromstring(base64.b64decode(data), np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# print(type(img)) # <class 'numpy.ndarray'>
return img
def load_in_from_cli():
raw_data = sys.argv[1]
image_data = re.sub('^data:image/.+;base64,', '', raw_data)
with open("debugcommand.txt", "w") as file:
file.write(raw_data)
img = readb64(image_data)
content = base64.b64decode(image_data)
with open("debugImage.png", "wb") as fh: # saves to backend/debugImage.png
fh.write(content)
return content
def get_old_content():
# Read original image
with io.open(os.path.join(FOLDER_PATH, FILE_NAME), 'rb') as image_file:
content = image_file.read()
return content
try: # ez error handling
# if we have cli, load it - otherwise use the directories and whatnot
cli_mode = len(sys.argv) > 1
if (cli_mode):
content = load_in_from_cli()
else:
content = get_old_content()
image = types.Image(content=content)
response = client.text_detection(image=image)
if not cli_mode:
print(response)
with open('debug.txt', 'w') as file:
file.write(str(response))
file.close()
bound_text = response.text_annotations[0].bounding_poly
bound_width = bound_text.vertices[2].x - bound_text.vertices[0].x
bound_height = bound_text.vertices[2].y - bound_text.vertices[0].y
def deskew(img, theta, bkg):
old_w, old_h = img.shape[:2]
theta_rad = math.radians(theta)
width = abs(np.sin(theta_rad) * old_h) + abs(np.cos(theta_rad) * old_w)
height = abs(np.sin(theta_rad) * old_w) + abs(np.cos(theta_rad) * old_h)
image_center = tuple(np.array(img.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, theta, 1.0)
rot_mat[1, 2] += (width - old_w) / 2
rot_mat[0, 2] += (height - old_h) / 2
return cv2.warpAffine(img, rot_mat, (int(
round(height)), int(round(width))), borderValue=bkg)
grayscale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
angle = determine_skew(grayscale)
# Deskewed image
rotated = deskew(img, angle, (0, 0, 0))
if bound_height > bound_width:
rotated = np.rot90(rotated)
deskewed_fname = os.path.join(FOLDER_PATH, ('deskewed_' + FILE_NAME + '.png'))
cv2.imwrite(deskewed_fname, rotated)
with io.open(deskewed_fname, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
response = client.text_detection(image=image)
blocks = response.text_annotations[1:]
paras = response.full_text_annotation.pages[0].blocks[0].paragraphs
char_list = []
bound_poly = []
conf_scores = []
is_upper = []
words = []
bound_poly_w = []
conf_scores_w = []
for b in blocks:
words.append(b.description)
bound_poly_w.append(b.bounding_poly)
conf_scores_w.append(b.confidence)
for p in paras:
for w in p.words:
for c in w.symbols:
if 'detected_break' in c.property and \
c.property.detected_break.type_.name == 'SPACE':
char_list.append(c.text)
char_list.append('SP')
for i in range(2):
bound_poly.append(c.bounding_box)
for i in range(2):
conf_scores.append(c.confidence)
if c.text.isupper():
for i in range(2):
is_upper.append(1)
else:
for i in range(2):
is_upper.append(0)
else:
if c.text.isupper():
is_upper.append(1)
else:
is_upper.append(0)
char_list.append(c.text)
bound_poly.append(c.bounding_box)
conf_scores.append(c.confidence)
df = pd.DataFrame({
'char': char_list,
'bound_poly': bound_poly,
'conf': conf_scores,
'is_upper': is_upper
})
words_df = pd.DataFrame({
'word': words,
'bound_poly': bound_poly_w,
'conf': conf_scores_w
})
def split_str(text):
str_list = text.split('\n')
x_points = []
y_points = []
for s in str_list:
if 'x: ' in s:
x_points.append(s.strip())
elif 'y: ' in s:
y_points.append(s.strip())
x_dict = {}
y_dict = {}
for i, x in enumerate(x_points):
x_dict['x_' + str(i + 1)] = [
int(s) for s in x.split() if s.isdigit()
][0]
for i, y in enumerate(y_points):
y_dict['y_' + str(i + 1)] = [
int(s) for s in y.split() if s.isdigit()
][0]
x_dict.update(y_dict)
return x_dict
def merge_dicts(df):
pos_dicts = []
for idx, row in df['bound_poly'].iteritems():
pos_dicts.append(split_str(str(row)))
d_keys = list(pos_dicts[0].keys())
final_dict = {k: [] for k in d_keys}
for d in pos_dicts:
for k in d_keys:
final_dict[k].append(d[k])
return final_dict
merged_dicts = merge_dicts(df)
merged_dicts_df = pd.DataFrame(merged_dicts)
df = pd.concat([df.char, merged_dicts_df, df.conf, df.is_upper], axis=1)
merged_dicts_w = merge_dicts(words_df)
merged_dicts_w_df = | pd.DataFrame(merged_dicts_w) | pandas.DataFrame |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = tm.makeIntIndex(10, name="a")
self.float_index = tm.makeFloatIndex(10, name="a")
self.dt_index = tm.makeDateIndex(10, name="a")
self.dt_tz_index = tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")
self.period_index = tm.makePeriodIndex(10, name="a")
self.string_index = tm.makeStringIndex(10, name="a")
self.unicode_index = tm.makeUnicodeIndex(10, name="a")
arr = np.random.randn(10)
self.bool_series = Series(arr, index=self.bool_index, name="a")
self.int_series = Series(arr, index=self.int_index, name="a")
self.float_series = Series(arr, index=self.float_index, name="a")
self.dt_series = Series(arr, index=self.dt_index, name="a")
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name="a")
self.string_series = Series(arr, index=self.string_index, name="a")
self.unicode_series = Series(arr, index=self.unicode_index, name="a")
types = ["bool", "int", "float", "dt", "dt_tz", "period", "string", "unicode"]
self.indexes = [getattr(self, "{}_index".format(t)) for t in types]
self.series = [getattr(self, "{}_series".format(t)) for t in types]
# To test narrow dtypes, we use narrower *data* elements, not *index* elements
index = self.int_index
self.float32_series = Series(arr.astype(np.float32), index=index, name="a")
arr_int = np.random.choice(10, size=10, replace=False)
self.int8_series = Series(arr_int.astype(np.int8), index=index, name="a")
self.int16_series = Series(arr_int.astype(np.int16), index=index, name="a")
self.int32_series = Series(arr_int.astype(np.int32), index=index, name="a")
self.uint8_series = Series(arr_int.astype(np.uint8), index=index, name="a")
self.uint16_series = Series(arr_int.astype(np.uint16), index=index, name="a")
self.uint32_series = Series(arr_int.astype(np.uint32), index=index, name="a")
nrw_types = ["float32", "int8", "int16", "int32", "uint8", "uint16", "uint32"]
self.narrow_series = [getattr(self, "{}_series".format(t)) for t in nrw_types]
self.objs = self.indexes + self.series + self.narrow_series
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index, op), index=o.index, name="a")
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these could be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(
expected, np.ndarray
):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
err = AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
err = TypeError
with pytest.raises(err):
getattr(o, op)
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_binary_ops_docs(self, klass):
op_map = {
"add": "+",
"sub": "-",
"mul": "*",
"mod": "%",
"pow": "**",
"truediv": "/",
"floordiv": "//",
}
for op_name in op_map:
operand1 = klass.__name__.lower()
operand2 = "other"
op = op_map[op_name]
expected_str = " ".join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = " ".join([operand2, op, operand1])
assert expected_str in getattr(klass, "r" + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
result = None != o # noqa
assert result.iat[0]
assert result.iat[1]
if is_datetime64_dtype(o) or is_datetime64tz_dtype(o):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ["shape", "dtype", "T", "nbytes"]:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ["flags", "strides", "itemsize"]:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, "base")
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
with tm.assert_produces_warning(FutureWarning):
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
# take-based repeat
indices = np.repeat(np.arange(len(o)), range(1, len(o) + 1))
rep = values.take(indices)
o = klass(rep, index=idx, name="a")
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(
range(10, 0, -1), index=expected_index, dtype="int64", name="a"
)
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == "a"
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
assert result.dtype == orig.dtype
elif is_datetime64tz_dtype(o):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(
result.astype(object), orig._values.astype(object)
)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert result.dtype == orig.dtype
assert o.nunique() == len(np.unique(o.values))
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_value_counts_unique_nunique_null(self, null_obj):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._ndarray_values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if is_datetime64tz_dtype(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = pd.NaT
values = o._values
elif needs_i8_conversion(o):
values[0:2] = iNaT
values = o._shallow_copy(values)
else:
values[0:2] = null_obj
# check values has the same dtype as the original
assert values.dtype == o.dtype
# create repeated values, 'n'th element is repeated by n+1
# times
if isinstance(o, (DatetimeIndex, PeriodIndex)):
expected_index = o.copy()
expected_index.name = None
# attach name to klass
o = klass(values.repeat(range(1, len(o) + 1)))
o.name = "a"
else:
if isinstance(o, DatetimeIndex):
expected_index = orig._values._shallow_copy(values)
else:
expected_index = Index(values)
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
# check values has the same dtype as the original
assert o.dtype == orig.dtype
# check values correctly have NaN
nanloc = np.zeros(len(o), dtype=np.bool)
nanloc[:3] = True
if isinstance(o, Index):
tm.assert_numpy_array_equal(pd.isna(o), nanloc)
else:
exp = Series(nanloc, o.index, name="a")
tm.assert_series_equal(pd.isna(o), exp)
expected_s_na = Series(
list(range(10, 2, -1)) + [3],
index=expected_index[9:0:-1],
dtype="int64",
name="a",
)
expected_s = Series(
list(range(10, 2, -1)),
index=expected_index[9:1:-1],
dtype="int64",
name="a",
)
result_s_na = o.value_counts(dropna=False)
tm.assert_series_equal(result_s_na, expected_s_na)
assert result_s_na.index.name is None
assert result_s_na.name == "a"
result_s = o.value_counts()
tm.assert_series_equal(o.value_counts(), expected_s)
assert result_s.index.name is None
assert result_s.name == "a"
result = o.unique()
if isinstance(o, Index):
tm.assert_index_equal(result, Index(values[1:], name="a"))
elif is_datetime64tz_dtype(o):
# unable to compare NaT / nan
tm.assert_extension_array_equal(result[1:], values[2:])
assert result[0] is pd.NaT
else:
tm.assert_numpy_array_equal(result[1:], values[2:])
assert pd.isna(result[0])
assert result.dtype == orig.dtype
assert o.nunique() == 8
assert o.nunique(dropna=False) == 9
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_inferred(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list("acbd")).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list("cdab"))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"])
tm.assert_series_equal(hist, expected)
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_bins(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
# bins
with pytest.raises(TypeError):
s.value_counts(bins=1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({Interval(0.997, 3.0): 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({Interval(0.997, 3.0): 1.0})
tm.assert_series_equal(res1n, exp1n)
if isinstance(s1, Index):
tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.unique(), exp)
assert s1.nunique() == 3
# these return the same
res4 = s1.value_counts(bins=4, dropna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4 = s1.value_counts(bins=4, dropna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series([0.5, 0.25, 0.25, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ["a", "b", "b", "b", np.nan, np.nan, "d", "d", "a", "a", "b"]
s = klass(s_values)
expected = Series([4, 3, 2], index=["b", "a", "d"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(["a", "b", np.nan, "d"])
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.array(["a", "b", np.nan, "d"], dtype=object)
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 3
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected, check_index_type=False)
# returned dtype differs depending on original
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), Index([]), exact=False)
else:
tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False)
assert s.nunique() == 0
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_datetime64(self, klass):
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(
[
"xxyyzz20100101PIE",
"xxyyzz20100101GUM",
"xxyyzz20100101EGG",
"xxyyww20090101EGG",
"foofoo20080909PIE",
"foofoo20080909GUM",
]
)
f = StringIO(txt)
df = pd.read_fwf(
f, widths=[6, 8, 3], names=["person_id", "dt", "food"], parse_dates=["dt"]
)
s = klass(df["dt"].copy())
s.name = None
idx = pd.to_datetime(
["2010-01-01 00:00:00", "2008-09-09 00:00:00", "2009-01-01 00:00:00"]
)
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np_array_datetime64_compat(
["2010-01-01 00:00:00", "2009-01-01 00:00:00", "2008-09-09 00:00:00"],
dtype="datetime64[ns]",
)
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), DatetimeIndex(expected))
else:
tm.assert_numpy_array_equal(s.unique(), expected)
assert s.nunique() == 3
# with NaT
s = df["dt"].copy()
s = klass(list(s.values) + [pd.NaT])
result = s.value_counts()
assert result.index.dtype == "datetime64[ns]"
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
assert unique.dtype == "datetime64[ns]"
# numpy_array_equal cannot compare pd.NaT
if isinstance(s, Index):
exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])
tm.assert_index_equal(unique, exp_idx)
else:
tm.assert_numpy_array_equal(unique[:3], expected)
assert pd.isna(unique[3])
assert s.nunique() == 3
assert s.nunique(dropna=False) == 4
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td, name="dt")
result = td.value_counts()
expected_s = Series([6], index=[Timedelta("1day")], name="dt")
tm.assert_series_equal(result, expected_s)
expected = TimedeltaIndex(["1 days"], name="dt")
if isinstance(td, Index):
tm.assert_index_equal(td.unique(), expected)
else:
tm.assert_numpy_array_equal(td.unique(), expected.values)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2, name="dt")
result2 = td2.value_counts()
tm.assert_series_equal(result2, expected_s)
def test_factorize(self):
for orig in self.objs:
o = orig.copy()
if isinstance(o, Index) and o.is_boolean():
exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp)
exp_uniques = o
exp_uniques = Index([False, True])
else:
exp_arr = np.array(range(len(o)), dtype=np.intp)
exp_uniques = o
codes, uniques = o.factorize()
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig), check_names=False)
else:
# factorize explicitly resets name
tm.assert_index_equal(uniques, exp_uniques, check_names=False)
def test_factorize_repeated(self):
for orig in self.objs:
o = orig.copy()
# don't test boolean
if isinstance(o, Index) and o.is_boolean():
continue
# sort by value, and create duplicates
if isinstance(o, Series):
o = o.sort_values()
n = o.iloc[5:].append(o)
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].append(o)
exp_arr = np.array(
[5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp
)
codes, uniques = n.factorize(sort=True)
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(
uniques, Index(orig).sort_values(), check_names=False
)
else:
tm.assert_index_equal(uniques, o, check_names=False)
exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4], np.intp)
codes, uniques = n.factorize(sort=False)
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
expected = Index(o.iloc[5:10].append(o.iloc[:5]))
tm.assert_index_equal(uniques, expected, check_names=False)
else:
expected = o[5:10].append(o[:5])
tm.assert_index_equal(uniques, expected, check_names=False)
def test_duplicated_drop_duplicates_index(self):
# GH 4060
for original in self.objs:
if isinstance(original, Index):
# special case
if original.is_boolean():
result = original.drop_duplicates()
expected = Index([False, True], name="a")
tm.assert_index_equal(result, expected)
continue
# original doesn't have duplicates
expected = np.array([False] * len(original), dtype=bool)
duplicated = original.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = original.drop_duplicates()
tm.assert_index_equal(result, original)
assert result is not original
# has_duplicates
assert not original.has_duplicates
# create repeated values, 3rd and 5th values are duplicated
idx = original[list(range(len(original))) + [5, 3]]
expected = np.array([False] * len(original) + [True, True], dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
tm.assert_index_equal(idx.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep="last")
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep="last")
tm.assert_index_equal(result, idx[~expected])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep=False)
tm.assert_index_equal(result, idx[~expected])
with pytest.raises(
TypeError,
match=(
r"drop_duplicates\(\) got an " r"unexpected keyword argument"
),
):
idx.drop_duplicates(inplace=True)
else:
expected = Series(
[False] * len(original), index=original.index, name="a"
)
tm.assert_series_equal(original.duplicated(), expected)
result = original.drop_duplicates()
tm.assert_series_equal(result, original)
assert result is not original
idx = original.index[list(range(len(original))) + [5, 3]]
values = original._values[list(range(len(original))) + [5, 3]]
s = Series(values, index=idx, name="a")
expected = Series(
[False] * len(original) + [True, True], index=idx, name="a"
)
tm.assert_series_equal(s.duplicated(), expected)
tm.assert_series_equal(s.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = Series(base, index=idx, name="a")
tm.assert_series_equal(s.duplicated(keep="last"), expected)
tm.assert_series_equal(
s.drop_duplicates(keep="last"), s[~np.array(base)]
)
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = Series(base, index=idx, name="a")
tm.assert_series_equal(s.duplicated(keep=False), expected)
tm.assert_series_equal(
s.drop_duplicates(keep=False), s[~np.array(base)]
)
s.drop_duplicates(inplace=True)
tm.assert_series_equal(s, original)
def test_drop_duplicates_series_vs_dataframe(self):
# GH 14192
df = pd.DataFrame(
{
"a": [1, 1, 1, "one", "one"],
"b": [2, 2, np.nan, np.nan, np.nan],
"c": [3, 3, np.nan, np.nan, "three"],
"d": [1, 2, 3, 4, 4],
"e": [
datetime(2015, 1, 1),
datetime(2015, 1, 1),
datetime(2015, 2, 1),
pd.NaT,
pd.NaT,
],
}
)
for column in df.columns:
for keep in ["first", "last", False]:
dropped_frame = df[[column]].drop_duplicates(keep=keep)
dropped_series = df[column].drop_duplicates(keep=keep)
tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())
def test_fillna(self):
# # GH 11343
# though Index.fillna and Series.fillna has separate impl,
# test here to confirm these works as the same
for orig in self.objs:
o = orig.copy()
values = o.values
# values will not be changed
result = o.fillna(o.astype(object).values[0])
if isinstance(o, Index):
tm.assert_index_equal(o, result)
else:
tm.assert_series_equal(o, result)
# check shallow_copied
assert o is not result
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.copy()
klass = type(o)
if not self._allow_na_ops(o):
continue
if needs_i8_conversion(o):
values = o.astype(object).values
fill_value = values[0]
values[0:2] = pd.NaT
else:
values = o.values.copy()
fill_value = o.values[0]
values[0:2] = null_obj
expected = [fill_value] * 2 + list(values[2:])
expected = klass(expected, dtype=orig.dtype)
o = klass(values)
# check values has the same dtype as the original
assert o.dtype == orig.dtype
result = o.fillna(fill_value)
if isinstance(o, Index):
tm.assert_index_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
# check shallow_copied
assert o is not result
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
for o in self.objs:
res = o.memory_usage()
res_deep = o.memory_usage(deep=True)
if is_object_dtype(o) or (
isinstance(o, Series) and is_object_dtype(o.index)
):
# if there are objects, only deep will pick them up
assert res_deep > res
else:
assert res == res_deep
if isinstance(o, Series):
assert (
o.memory_usage(index=False) + o.index.memory_usage()
) == o.memory_usage(index=True)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = res_deep - sys.getsizeof(o)
assert abs(diff) < 100
def test_searchsorted(self):
# See gh-12238
for o in self.objs:
index = np.searchsorted(o, max(o))
assert 0 <= index <= len(o)
index = np.searchsorted(o, max(o), sorter=range(len(o)))
assert 0 <= index <= len(o)
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
self.int_series.drop_duplicates(inplace=value)
def test_getitem(self):
for i in self.indexes:
s = pd.Series(i)
assert i[0] == s.iloc[0]
assert i[5] == s.iloc[5]
assert i[-1] == s.iloc[-1]
assert i[-1] == i[9]
with pytest.raises(IndexError):
i[20]
with pytest.raises(IndexError):
s.iloc[20]
@pytest.mark.parametrize("indexer_klass", [list, pd.Index])
@pytest.mark.parametrize(
"indexer",
[
[True] * 10,
[False] * 10,
[True, False, True, True, False, False, True, True, False, True],
],
)
def test_bool_indexing(self, indexer_klass, indexer):
# GH 22533
for idx in self.indexes:
exp_idx = [i for i in range(len(indexer)) if indexer[i]]
tm.assert_index_equal(idx[indexer_klass(indexer)], idx[exp_idx])
s = pd.Series(idx)
tm.assert_series_equal(s[indexer_klass(indexer)], s.iloc[exp_idx])
def test_get_indexer_non_unique_dtype_mismatch(self):
# GH 25459
indexes, missing = pd.Index(["A", "B"]).get_indexer_non_unique(pd.Index([0]))
tm.assert_numpy_array_equal(np.array([-1], dtype=np.intp), indexes)
tm.assert_numpy_array_equal(np.array([0], dtype=np.int64), missing)
class TestTranspose(Ops):
errmsg = "the 'axes' parameter is not supported"
def test_transpose(self):
for obj in self.objs:
tm.assert_equal(obj.transpose(), obj)
def test_transpose_non_default_axes(self):
for obj in self.objs:
with pytest.raises(ValueError, match=self.errmsg):
obj.transpose(1)
with pytest.raises(ValueError, match=self.errmsg):
obj.transpose(axes=1)
def test_numpy_transpose(self):
for obj in self.objs:
tm.assert_equal(np.transpose(obj), obj)
with pytest.raises(ValueError, match=self.errmsg):
np.transpose(obj, axes=1)
class TestNoNewAttributesMixin:
def test_mixin(self):
class T(NoNewAttributesMixin):
pass
t = T()
assert not hasattr(t, "__frozen")
t.a = "test"
assert t.a == "test"
t._freeze()
assert "__frozen" in dir(t)
assert getattr(t, "__frozen")
with pytest.raises(AttributeError):
t.b = "test"
assert not hasattr(t, "b")
class TestToIterable:
# test that we convert an iterable to python types
dtypes = [
("int8", int),
("int16", int),
("int32", int),
("int64", int),
("uint8", int),
("uint16", int),
("uint32", int),
("uint64", int),
("float16", float),
("float32", float),
("float64", float),
("datetime64[ns]", Timestamp),
("datetime64[ns, US/Eastern]", Timestamp),
("timedelta64[ns]", Timedelta),
]
@pytest.mark.parametrize("dtype, rdtype", dtypes)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.parametrize("typ", [Series, Index])
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable(self, typ, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype, obj",
[
("object", object, "a"),
("object", int, 1),
("category", object, "a"),
("category", int, 1),
],
)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.parametrize("typ", [Series, Index])
def test_iterable_object_and_category(self, typ, method, dtype, rdtype, obj):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test if items yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
)
@pytest.mark.parametrize("typ", [Series, Index])
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable_map(self, typ, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")]
s = Series(vals)
assert s.dtype == "datetime64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [
Timestamp("2011-01-01", tz="US/Eastern"),
| Timestamp("2011-01-02", tz="US/Eastern") | pandas.Timestamp |
# -*- coding: utf-8 -*-
from __future__ import division
from functools import wraps
import numpy as np
from pandas import DataFrame, Series
#from pandas.stats import moments
import pandas as pd
def simple_moving_average(prices, period=26):
"""
:param df: pandas dataframe object
:param period: periods for calculating SMA
:return: a pandas series
"""
weights = np.repeat(1.0, period) / period
sma = np.convolve(prices, weights, 'valid')
return sma
def stochastic_oscillator_k(df):
"""Calculate stochastic oscillator %K for given data.
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
SOk = pd.Series((df['close'] - df['low']) / (df['high'] - df['low']), name='SO%k')
df = df.join(SOk)
return df
def stochastic_oscillator_d(df, n):
"""Calculate stochastic oscillator %D for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
SOk = pd.Series((df['close'] - df['low']) / (df['high'] - df['low']), name='SO%k')
SOd = pd.Series(SOk.ewm(span=n, min_periods=n).mean(), name='SO%d')
df = df.join(SOd)
return df
def bollinger_bands(df, n, std, add_ave=True):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
ave = df['close'].rolling(window=n, center=False).mean()
sd = df['close'].rolling(window=n, center=False).std()
upband = pd.Series(ave + (sd * std), name='bband_upper_' + str(n))
dnband = pd.Series(ave - (sd * std), name='bband_lower_' + str(n))
if add_ave:
ave = pd.Series(ave, name='bband_ave_' + str(n))
df = df.join(pd.concat([upband, dnband, ave], axis=1))
else:
df = df.join(pd.concat([upband, dnband], axis=1))
return df
def money_flow_index(df, n):
"""Calculate Money Flow Index and Ratio for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
PP = (df['high'] + df['low'] + df['close']) / 3
i = 0
PosMF = [0]
while i < df.index[-1]:
if PP[i + 1] > PP[i]:
PosMF.append(PP[i + 1] * df.loc[i + 1, 'volume'])
else:
PosMF.append(0)
i = i + 1
PosMF = pd.Series(PosMF)
TotMF = PP * df['volume']
MFR = pd.Series(PosMF / TotMF)
MFI = pd.Series(MFR.rolling(n, min_periods=n).mean())
# df = df.join(MFI)
return MFI
def series_indicator(col):
def inner_series_indicator(f):
@wraps(f)
def wrapper(s, *args, **kwargs):
if isinstance(s, DataFrame):
s = s[col]
return f(s, *args, **kwargs)
return wrapper
return inner_series_indicator
def _wilder_sum(s, n):
s = s.dropna()
nf = (n - 1) / n
ws = [np.nan] * (n - 1) + [s[n - 1] + nf * sum(s[:n - 1])]
for v in s[n:]:
ws.append(v + ws[-1] * nf)
return Series(ws, index=s.index)
@series_indicator('high')
def hhv(s, n):
return pd.rolling_max(s, n)
@series_indicator('low')
def llv(s, n):
return pd.rolling_min(s, n)
@series_indicator('close')
def ema(s, n, wilder=False):
span = n if not wilder else 2 * n - 1
return pd.ewma(s, span=span)
@series_indicator('close')
def macd(s, nfast=12, nslow=26, nsig=9, percent=True):
fast, slow = ema(s, nfast), ema(s, nslow)
if percent:
macd = 100 * (fast / slow - 1)
else:
macd = fast - slow
sig = ema(macd, nsig)
hist = macd - sig
return DataFrame(dict(macd=macd, signal=sig, hist=hist,
fast=fast, slow=slow))
def aroon(s, n=25):
up = 100 * pd.rolling_apply(s.high, n + 1, lambda x: x.argmax()) / n
dn = 100 * pd.rolling_apply(s.low, n + 1, lambda x: x.argmin()) / n
return DataFrame(dict(up=up, down=dn))
@series_indicator('close')
def rsi(s, n=14):
diff = s.diff()
which_dn = diff < 0
up, dn = diff, diff * 0
up[which_dn], dn[which_dn] = 0, -up[which_dn]
emaup = ema(up, n, wilder=True)
emadn = ema(dn, n, wilder=True)
return 100 * emaup / (emaup + emadn)
def stoch(s, nfastk=14, nfullk=3, nfulld=3):
if not isinstance(s, DataFrame):
s = DataFrame(dict(high=s, low=s, close=s))
hmax, lmin = hhv(s, nfastk), llv(s, nfastk)
fastk = 100 * (s.close - lmin) / (hmax - lmin)
fullk = pd.rolling_mean(fastk, nfullk)
fulld = pd.rolling_mean(fullk, nfulld)
return DataFrame(dict(fastk=fastk, fullk=fullk, fulld=fulld))
@series_indicator('close')
def dtosc(s, nrsi=13, nfastk=8, nfullk=5, nfulld=3):
srsi = stoch(rsi(s, nrsi), nfastk, nfullk, nfulld)
return DataFrame(dict(fast=srsi.fullk, slow=srsi.fulld))
def atr(s, n=14):
cs = s.close.shift(1)
tr = s.high.combine(cs, max) - s.low.combine(cs, min)
return ema(tr, n, wilder=True)
def cci(s, n=20, c=0.015):
if isinstance(s, DataFrame):
s = s[['high', 'low', 'close']].mean(axis=1)
mavg = pd.rolling_mean(s, n)
mdev = pd.rolling_apply(s, n, lambda x: np.fabs(x - x.mean()).mean())
return (s - mavg) / (c * mdev)
def cmf(s, n=20):
clv = (2 * s.close - s.high - s.low) / (s.high - s.low)
vol = s.volume
return pd.rolling_sum(clv * vol, n) / pd.rolling_sum(vol, n)
def force(s, n=2):
return ema(s.close.diff() * s.volume, n)
@series_indicator('close')
def kst(s, r1=10, r2=15, r3=20, r4=30, n1=10, n2=10, n3=10, n4=15, nsig=9):
rocma1 = pd.rolling_mean(s / s.shift(r1) - 1, n1)
rocma2 = pd.rolling_mean(s / s.shift(r2) - 1, n2)
rocma3 = pd.rolling_mean(s / s.shift(r3) - 1, n3)
rocma4 = pd.rolling_mean(s / s.shift(r4) - 1, n4)
kst = 100 * (rocma1 + 2 * rocma2 + 3 * rocma3 + 4 * rocma4)
sig = pd.rolling_mean(kst, nsig)
return DataFrame(dict(kst=kst, signal=sig))
def ichimoku(s, n1=9, n2=26, n3=52):
conv = (hhv(s, n1) + llv(s, n1)) / 2
base = (hhv(s, n2) + llv(s, n2)) / 2
spana = (conv + base) / 2
spanb = (hhv(s, n3) + llv(s, n3)) / 2
return DataFrame(dict(conv=conv, base=base, spana=spana.shift(n2),
spanb=spanb.shift(n2), lspan=s.close.shift(-n2)))
def ultimate(s, n1=7, n2=14, n3=28):
cs = s.close.shift(1)
bp = s.close - s.low.combine(cs, min)
tr = s.high.combine(cs, max) - s.low.combine(cs, min)
avg1 = | pd.rolling_sum(bp, n1) | pandas.rolling_sum |
# Boston housing demo
import superimport
import numpy as np
import matplotlib.pyplot as plt
import os
figdir = "../figures"
def save_fig(fname): plt.savefig(os.path.join(figdir, fname))
import pandas as pd
import sklearn.datasets
import sklearn.linear_model as lm
from sklearn.model_selection import train_test_split
# Prevent numpy from printing too many digits
np.set_printoptions(precision=3)
# Load data (creates numpy arrays)
boston = sklearn.datasets.load_boston()
X = boston.data
y = boston.target
# Convert to Pandas format
df = | pd.DataFrame(X) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from mabwiser.mab import MAB, LearningPolicy, NeighborhoodPolicy
from tests.test_base import BaseTest
class MABTest(BaseTest):
#################################################
# Test context free predict() method
################################################
def test_arm_list_int(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_arm_list_str(self):
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_decision_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards= | pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]) | pandas.Series |
# This code extract the features from the raw joined dataset (data.csv)
# and save it in the LibSVM format.
# Usage: python construct_features.py
import pandas as pd
import numpy as np
from sklearn.datasets import dump_svmlight_file
df = pd.read_csv("data.csv", low_memory=False)
# NPU
NPU = df.NPU.copy()
NPU[NPU == ' '] = np.nan
NPU = pd.get_dummies(NPU, prefix="NPU")
# SiteZip
SiteZip = df.SiteZip.copy()
SiteZip = SiteZip.str.replace(',','')
SiteZip = SiteZip.str.replace('\.00','')
SiteZip = SiteZip.replace('0',np.nan)
SiteZip = pd.get_dummies(SiteZip, prefix="SiteZip")
# Submarket1
Submarket1 = df.Submarket1.copy()
Submarket1 = pd.get_dummies(Submarket1, prefix="Submarket1")
# TAX_DISTR
TAX_DISTR = df.TAX_DISTR.copy()
TAX_DISTR[TAX_DISTR == ' '] = np.nan
TAX_DISTR = pd.get_dummies(TAX_DISTR, prefix="TAX_DISTR")
# NBHD
NBHD = df.NBHD.copy()
NBHD[NBHD == ' '] = np.nan
NBHD = pd.get_dummies(NBHD, prefix="NBHD")
# ZONING_NUM
ZONING_NUM = df.ZONING_NUM.copy()
ZONING_NUM[ZONING_NUM == ' '] = np.nan
ZONING_NUM = pd.get_dummies(ZONING_NUM, prefix="ZONING_NUM")
# building_c
building_c = df.building_c.copy()
building_c[building_c == ' '] = np.nan
building_c = pd.get_dummies(building_c, prefix="building_c")
# PROP_CLASS
PROP_CLASS = df.PROP_CLASS.copy()
PROP_CLASS[PROP_CLASS == ' '] = np.nan
PROP_CLASS = pd.get_dummies(PROP_CLASS, prefix="PROP_CLASS")
# Existing_p
Existing_p = df.Existing_p.copy()
Existing_p[Existing_p == ' '] = np.nan
Existing_p = pd.get_dummies(Existing_p, prefix="Existing_p")
# PropertyTy
PropertyTy = df.PropertyTy.copy()
PropertyTy = pd.get_dummies(PropertyTy, prefix="PropertyTy")
# secondaryT
secondaryT = df.secondaryT.copy()
secondaryT[secondaryT == ' '] = np.nan
secondaryT = pd.get_dummies(secondaryT, prefix="secondaryT")
# LUC
LUC = df.LUC.copy()
LUC[LUC == ' '] = np.nan
LUC = pd.get_dummies(LUC, prefix="LUC")
# Taxes_Per_
Taxes_Per_ = df.Taxes_Per_.copy()
Taxes_Per_zero = (Taxes_Per_ == "0").apply(int)
Taxes_Per_zero.name = 'Taxes_Per_zero'
Taxes_Per_ = Taxes_Per_.str.replace(',','').astype(float)
Taxes_Per_ = np.log1p(Taxes_Per_)
Taxes_Per_ = Taxes_Per_ / Taxes_Per_.max()
Taxes_Per_ = pd.concat([Taxes_Per_, Taxes_Per_zero], axis=1)
# Taxes_Tota
Taxes_Tota = df.Taxes_Tota.copy()
Taxes_Tota_zero = (Taxes_Tota == "0").apply(int)
Taxes_Tota_zero.name = 'Taxes_Tota_zero'
Taxes_Tota = Taxes_Tota.str.replace(',','').astype(float)
Taxes_Tota = np.log1p(Taxes_Tota)
Taxes_Tota = Taxes_Tota / Taxes_Tota.max()
Taxes_Tota = pd.concat([Taxes_Tota, Taxes_Tota_zero], axis=1)
# TOT_APPR
TOT_APPR = df.TOT_APPR.copy()
TOT_APPR_zero = (TOT_APPR == "0").apply(int)
TOT_APPR_zero.name = 'TOT_APPR_zero'
TOT_APPR = TOT_APPR.str.replace(',','').astype(float)
TOT_APPR = np.log1p(TOT_APPR)
TOT_APPR = TOT_APPR / TOT_APPR.max()
TOT_APPR = pd.concat([TOT_APPR, TOT_APPR_zero], axis=1)
# VAL_ACRES
VAL_ACRES = df.VAL_ACRES.copy()
VAL_ACRES_zero = (VAL_ACRES == 0).apply(int)
VAL_ACRES_zero.name = 'VAL_ACRES_zero'
VAL_ACRES = np.log1p(VAL_ACRES)
VAL_ACRES = VAL_ACRES / VAL_ACRES.max()
VAL_ACRES = pd.concat([VAL_ACRES, VAL_ACRES_zero], axis=1)
# For_Sale_P
For_Sale_P = df.For_Sale_P.copy()
For_Sale_P_notNA = (For_Sale_P != " ").apply(int)
For_Sale_P_notNA.name = 'For_Sale_P_notNA'
For_Sale_P[For_Sale_P == ' '] = 0
For_Sale_P = For_Sale_P.astype(float)
For_Sale_P = np.log1p(For_Sale_P)
For_Sale_P = For_Sale_P / For_Sale_P.max()
For_Sale_P = pd.concat([For_Sale_P, For_Sale_P_notNA], axis=1)
# Last_Sale1
Last_Sale1 = df.Last_Sale1.copy()
Last_Sale1_zero = (Last_Sale1 == "0").apply(int)
Last_Sale1_zero.name = "Last_Sale1_zero"
Last_Sale1 = Last_Sale1.str.replace(',','').astype(float)
Last_Sale1 = np.log1p(Last_Sale1)
Last_Sale1 = (Last_Sale1 - Last_Sale1.min()) / (Last_Sale1.max() - Last_Sale1.min())
Last_Sale1 = pd.concat([Last_Sale1, Last_Sale1_zero], axis=1)
# yearbuilt
yearbuilt = df.yearbuilt.copy()
yearbuilt_zero = (yearbuilt == "0").apply(int)
yearbuilt_zero.name = "yearbuilt_zero"
yearbuilt[yearbuilt == "0"] = np.nan
yearbuilt = yearbuilt.str.replace(',','').astype(float)
yearbuilt = (yearbuilt - yearbuilt.min()) / (yearbuilt.max() - yearbuilt.min())
yearbuilt = yearbuilt.fillna(0)
yearbuilt = | pd.concat([yearbuilt, yearbuilt_zero], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
from copy import deepcopy
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt.utils.random_ import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.portfolio['attach_call_seq'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_execute_order_nb():
# Errors, ignored and rejected orders
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(-100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.nan, 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.inf, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.nan, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., np.nan, 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., -10., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., np.nan, 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., -100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, -10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=0))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=-10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=np.nan))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., np.nan, 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., -10., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(0, 10))
assert exec_state == ExecuteOrderState(cash=100.0, position=10.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(15, 10, max_size=10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=1.))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100, 0., np.inf, np.nan, 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(100, 10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-200, 10, direction=Direction.LongOnly, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, fixed_fees=1000))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=180.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=909.0, position=-100.0, debt=900.0, free_cash=-891.0)
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=7.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=125.0, position=-2.5, debt=25.0, free_cash=75.0)
assert_same_tuple(order_result, OrderResult(
size=7.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-2.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=-2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-7.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-10.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(150., -5., 0., 150., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=300.0, position=-20.0, debt=150.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=50.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(1000., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 17.5, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=850.0, position=3.571428571428571, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.571428571428571, price=17.5, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 100, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=37.5, position=-4.375, debt=43.75, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=0.625, price=100.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 10., 0., -50., 10., 100., 0, 0),
nb.order_nb(-20, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=150.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 1., 0., -50., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=10.0, position=0.0, debt=0.0, free_cash=-40.0)
assert_same_tuple(order_result, OrderResult(
size=1.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., -100., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=-100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-20, 10, fees=0.1, slippage=0.1, fixed_fees=1., lock_cash=True))
assert exec_state == ExecuteOrderState(cash=80.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_both(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='both', **kwargs)
def from_orders_longonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='longonly', **kwargs)
def from_orders_shortonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_both().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1), (8, 2, 0, 100.0, 1.0, 0.0, 0),
(9, 2, 1, 100.0, 2.0, 0.0, 1), (10, 2, 3, 50.0, 4.0, 0.0, 0), (11, 2, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 2, 0, 100.0, 1.0, 0.0, 1), (5, 2, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_both(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 198.01980198019803, 2.02, 0.0, 1),
(2, 0, 3, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 1),
(2, 0, 3, 49.504950495049506, 4.04, 0.0, 0), (3, 0, 4, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1),
(2, 0, 3, 50.0, 4.0, 0.0, 0), (3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 0), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 1), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 1, 3, 1.0, 4.0, 0.4, 1), (7, 1, 4, 1.0, 5.0, 0.5, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 2.0, 0), (10, 2, 3, 1.0, 4.0, 4.0, 1), (11, 2, 4, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 1, 3, 1.0, 4.0, 0.1, 1), (7, 1, 4, 1.0, 5.0, 0.1, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 1.0, 0), (10, 2, 3, 1.0, 4.0, 1.0, 1), (11, 2, 4, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_both(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 1, 3, 1.0, 3.6, 0.0, 1), (7, 1, 4, 1.0, 5.5, 0.0, 0), (8, 2, 0, 1.0, 0.0, 0.0, 1),
(9, 2, 1, 1.0, 4.0, 0.0, 0), (10, 2, 3, 1.0, 0.0, 0.0, 1), (11, 2, 4, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0), (2, 0, 3, 0.5, 4.0, 0.0, 1),
(3, 0, 4, 0.5, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0), (8, 2, 0, 1.0, 1.0, 0.0, 1),
(9, 2, 1, 1.0, 2.0, 0.0, 0), (10, 2, 3, 1.0, 4.0, 0.0, 1), (11, 2, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_both(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 1, 3, 1.0, 4.0, 0.0, 0),
(6, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 3, 1.0, 4.0, 0.0, 0), (5, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_lock_cash(self):
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[143.12812469365747, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-49.5, -49.5]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[94.6034702480149, 47.54435839623566]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[49.5, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[1.4312812469365748, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-96.16606313106556, -96.16606313106556]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[0.4699090272918124, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[98.06958012596222, 98.06958012596222]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = from_orders_both(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 1000., 2., 0., 1),
(2, 0, 3, 500., 4., 0., 0), (3, 0, 4, 1000., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 200., 2., 0., 1),
(6, 1, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-6600.0, 0.0]
])
)
pf = from_orders_longonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 100., 2., 0., 1),
(2, 0, 3, 50., 4., 0., 0), (3, 0, 4, 50., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 100., 2., 0., 1),
(6, 1, 3, 50., 4., 0., 0), (7, 1, 4, 50., 5., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[200.0, 200.0],
[200.0, 200.0],
[0.0, 0.0],
[250.0, 250.0]
])
)
pf = from_orders_shortonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1000., 1., 0., 1), (1, 0, 1, 550., 2., 0., 0),
(2, 0, 3, 1000., 4., 0., 1), (3, 0, 4, 800., 5., 0., 0),
(4, 1, 0, 100., 1., 0., 1), (5, 1, 1, 100., 2., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[-900.0, 0.0],
[-900.0, 0.0],
[-900.0, 0.0],
[-4900.0, 0.0],
[-3989.6551724137926, 0.0]
])
)
def test_allow_partial(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 1, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0), (4, 1, 0, 1000.0, 1.0, 0.0, 1), (5, 1, 3, 1000.0, 4.0, 0.0, 1),
(6, 1, 4, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_orders_both(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0,
100.0, 0.0, 0.0, 1.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 1, 0.0, 100.0, 0.0, 0.0, 2.0, 200.0, -np.inf, 2.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 2.0, 200.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 0, 0, 2, 400.0, -100.0, 200.0, 0.0, 3.0, 100.0, np.nan, 3.0, 0,
2, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 3.0, 100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 0, 0, 3, 400.0, -100.0, 200.0, 0.0, 4.0, 0.0, np.inf, 4.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 4.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 0, 0, 4, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0, -np.inf, 5.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 5.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_orders_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_value(self):
record_arrays_close(
from_orders_both(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0),
(2, 0, 3, 0.25, 4.0, 0.0, 1), (3, 0, 4, 0.2, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_amount(self):
record_arrays_close(
from_orders_both(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=75., size_type='targetamount',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_both(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 1, 2, 8.333333333333332, 3.0, 0.0, 0),
(8, 1, 3, 4.166666666666668, 4.0, 0.0, 0), (9, 1, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 25.0, 2.0, 0.0, 0),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 0), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 0),
(4, 0, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0),
(2, 0, 1, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 2, 1, 25.0, 2.0, 0.0, 0), (5, 0, 2, 8.333333333333332, 3.0, 0.0, 1),
(6, 1, 2, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 0, 3, 4.166666666666668, 4.0, 0.0, 1), (9, 1, 3, 4.166666666666668, 4.0, 0.0, 1),
(10, 2, 3, 4.166666666666668, 4.0, 0.0, 1), (11, 0, 4, 2.5, 5.0, 0.0, 1),
(12, 1, 4, 2.5, 5.0, 0.0, 1), (13, 2, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 1, 2, 6.25, 3.0, 0.0, 0), (8, 1, 3, 2.34375, 4.0, 0.0, 0),
(9, 1, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 37.5, 2.0, 0.0, 0), (2, 0, 2, 6.25, 3.0, 0.0, 0),
(3, 0, 3, 2.34375, 4.0, 0.0, 0), (4, 0, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_update_value(self):
record_arrays_close(
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=False).order_records,
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=True).order_records
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=False).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.9465661198057499, 2.02, 0.019120635620076154, 0),
(4, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(5, 1, 2, 0.018558300554959377, 3.0300000000000002, 0.0005623165068152705, 0),
(6, 0, 3, 0.00037870218456959037, 3.96, 1.4996606508955778e-05, 1),
(7, 1, 3, 0.0003638525743521767, 4.04, 1.4699644003827875e-05, 0),
(8, 0, 4, 7.424805112066224e-06, 4.95, 3.675278530472781e-07, 1),
(9, 1, 4, 7.133664827307231e-06, 5.05, 3.6025007377901643e-07, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.7303208018821721, 2.02, 0.014752480198019875, 0),
(4, 2, 1, 0.21624531792357785, 2.02, 0.0043681554220562635, 0),
(5, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(6, 1, 2, 0.009608602243410758, 2.9699999999999998, 0.00028537548662929945, 1),
(7, 2, 2, 0.02779013180558861, 3.0300000000000002, 0.0008420409937093393, 0),
(8, 0, 3, 0.0005670876809631409, 3.96, 2.2456672166140378e-05, 1),
(9, 1, 3, 0.00037770350099464167, 3.96, 1.4957058639387809e-05, 1),
(10, 2, 3, 0.0009077441794302741, 4.04, 3.6672864848982974e-05, 0),
(11, 0, 4, 1.8523501267964093e-05, 4.95, 9.169133127642227e-07, 1),
(12, 1, 4, 1.2972670177191503e-05, 4.95, 6.421471737709794e-07, 1),
(13, 2, 4, 3.0261148547590434e-05, 5.05, 1.5281880016533242e-06, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0), (5, 1, 0, 50., 1., 0., 1),
(6, 1, 1, 12.5, 2., 0., 1), (7, 1, 2, 4.16666667, 3., 0., 1),
(8, 1, 3, 1.5625, 4., 0., 1), (9, 1, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 1, 12.5, 2., 0., 1),
(2, 0, 2, 4.16666667, 3., 0., 1), (3, 0, 3, 1.5625, 4., 0., 1),
(4, 0, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 1, 0, 2.50000000e+01, 1., 0., 0),
(2, 2, 0, 1.25000000e+01, 1., 0., 0), (3, 0, 1, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 2, 1, 7.81250000e-01, 2., 0., 0),
(6, 0, 2, 2.60416667e-01, 3., 0., 0), (7, 1, 2, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 0, 3, 2.44140625e-02, 4., 0., 0),
(10, 1, 3, 1.22070312e-02, 4., 0., 0), (11, 2, 3, 6.10351562e-03, 4., 0., 0),
(12, 0, 4, 2.44140625e-03, 5., 0., 0), (13, 1, 4, 1.22070312e-03, 5., 0., 0),
(14, 2, 4, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_both(close=price_wide)
_ = from_orders_both(close=price_wide, max_orders=9)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_both(close=price_wide, log=True)
_ = from_orders_both(close=price_wide, log=True, max_logs=15)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, log=True, max_logs=14)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='both', **kwargs)
def from_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='shortonly', **kwargs)
def from_ls_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, False, exits, False, **kwargs)
def from_ls_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, False, False, **kwargs)
def from_ls_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, False, False, entries, exits, **kwargs)
class TestFromSignals:
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_one_column(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_multiple_columns(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 200., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 100., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0),
(2, 1, 0, 100., 1., 0., 1), (3, 1, 3, 50., 4., 0., 0),
(4, 2, 0, 100., 1., 0., 1), (5, 2, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_custom_signal_func(self):
@njit
def signal_func_nb(c, long_num_arr, short_num_arr):
long_num = nb.get_elem_nb(c, long_num_arr)
short_num = nb.get_elem_nb(c, short_num_arr)
is_long_entry = long_num > 0
is_long_exit = long_num < 0
is_short_entry = short_num > 0
is_short_exit = short_num < 0
return is_long_entry, is_long_exit, is_short_entry, is_short_exit
pf_base = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
entries=pd.Series([True, False, False, False, False]),
exits=pd.Series([False, False, True, False, False]),
short_entries=pd.Series([False, True, False, True, False]),
short_exits=pd.Series([False, False, False, False, True]),
size=1,
upon_opposite_entry='ignore'
)
pf = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
signal_func_nb=signal_func_nb,
signal_args=(vbt.Rep('long_num_arr'), vbt.Rep('short_num_arr')),
broadcast_named_args=dict(
long_num_arr=pd.Series([1, 0, -1, 0, 0]),
short_num_arr=pd.Series([0, 1, 0, 1, -1])
),
size=1,
upon_opposite_entry='ignore'
)
record_arrays_close(
pf_base.order_records,
pf.order_records
)
def test_amount(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 2.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_value(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 0.3125, 4.0, 0.0, 1),
(2, 1, 4, 0.1775, 5.0, 0.0, 1), (3, 2, 0, 100.0, 1.0, 0.0, 0),
(4, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception):
_ = from_signals_both(size=0.5, size_type='percent')
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1), (2, 0, 4, 25., 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close',
accumulate=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 0),
(2, 0, 3, 62.5, 4.0, 0.0, 1), (3, 0, 4, 27.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 3, 37.5, 4., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 25., 1., 0., 0),
(2, 2, 0, 12.5, 1., 0., 0), (3, 0, 3, 50., 4., 0., 1),
(4, 1, 3, 25., 4., 0., 1), (5, 2, 3, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 3, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 0, 3, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 3, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_signals_both(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.8, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.4, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.4, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_both(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.1, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_both(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 2.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 1.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 0.9, 0.0, 1),
(3, 1, 3, 1.0, 4.4, 0.0, 0), (4, 2, 0, 1.0, 0.0, 0.0, 1), (5, 2, 3, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_both(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_both(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 0, 4, 0.5, 5.0, 0.0, 1),
(3, 1, 0, 1.0, 1.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1), (4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 3, 0.5, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_both(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1), (2, 1, 3, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 3, 275.0, 4.0, 0.0, 0), (2, 1, 0, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_both(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_signals_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2, 0.0, 0.0,
0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 100.0, 0.0, 0.0, 1.0,
100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 3, 0.0, 100.0, 0.0, 0.0, 4.0, 400.0, -np.inf, 4.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 800.0, -100.0,
400.0, 0.0, 4.0, 400.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_accumulate(self):
record_arrays_close(
from_signals_both(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 3.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 1.0, 4.0, 0.0, 1), (8, 2, 4, 1.0, 5.0, 0.0, 1),
(9, 3, 0, 1.0, 1.0, 0.0, 0), (10, 3, 1, 1.0, 2.0, 0.0, 0), (11, 3, 3, 1.0, 4.0, 0.0, 1),
(12, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 2.0, 4.0, 0.0, 1), (5, 2, 0, 1.0, 1.0, 0.0, 0),
(6, 2, 3, 1.0, 4.0, 0.0, 1), (7, 3, 0, 1.0, 1.0, 0.0, 0), (8, 3, 1, 1.0, 2.0, 0.0, 0),
(9, 3, 3, 1.0, 4.0, 0.0, 1), (10, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 1, 1.0, 2.0, 0.0, 1), (4, 1, 3, 2.0, 4.0, 0.0, 0), (5, 2, 0, 1.0, 1.0, 0.0, 1),
(6, 2, 3, 1.0, 4.0, 0.0, 0), (7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 1),
(9, 3, 3, 1.0, 4.0, 0.0, 0), (10, 3, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_long_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_long_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 1, 1.0, 2.0, 0.0, 0), (5, 2, 2, 1.0, 3.0, 0.0, 1),
(6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 0),
(8, 5, 1, 1.0, 2.0, 0.0, 0), (9, 5, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_short_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_short_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 1),
(1, 1, 0, 1.0, 1.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 1, 1.0, 2.0, 0.0, 1), (5, 2, 2, 1.0, 3.0, 0.0, 0),
(6, 3, 1, 1.0, 2.0, 0.0, 1), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 5, 1, 1.0, 2.0, 0.0, 1), (9, 5, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_dir_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_dir_conflict=[[
'ignore',
'long',
'short',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 1, 1.0, 2.0, 0.0, 0), (6, 2, 2, 1.0, 3.0, 0.0, 1),
(7, 3, 1, 1.0, 2.0, 0.0, 0), (8, 3, 2, 1.0, 3.0, 0.0, 0),
(9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 1),
(11, 5, 1, 1.0, 2.0, 0.0, 0), (12, 5, 2, 1.0, 3.0, 0.0, 1),
(13, 6, 1, 1.0, 2.0, 0.0, 1), (14, 6, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_opposite_entry(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False]
]),
exits=pd.DataFrame([
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True]
]),
size=1.,
upon_opposite_entry=[[
'ignore',
'ignore',
'close',
'close',
'closereduce',
'closereduce',
'reverse',
'reverse',
'reversereduce',
'reversereduce'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 1),
(2, 2, 0, 1.0, 1.0, 0.0, 0), (3, 2, 1, 1.0, 2.0, 0.0, 1), (4, 2, 2, 1.0, 3.0, 0.0, 0),
(5, 3, 0, 1.0, 1.0, 0.0, 1), (6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 4, 0, 1.0, 1.0, 0.0, 0), (9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 0),
(11, 5, 0, 1.0, 1.0, 0.0, 1), (12, 5, 1, 1.0, 2.0, 0.0, 0), (13, 5, 2, 1.0, 3.0, 0.0, 1),
(14, 6, 0, 1.0, 1.0, 0.0, 0), (15, 6, 1, 2.0, 2.0, 0.0, 1), (16, 6, 2, 2.0, 3.0, 0.0, 0),
(17, 7, 0, 1.0, 1.0, 0.0, 1), (18, 7, 1, 2.0, 2.0, 0.0, 0), (19, 7, 2, 2.0, 3.0, 0.0, 1),
(20, 8, 0, 1.0, 1.0, 0.0, 0), (21, 8, 1, 2.0, 2.0, 0.0, 1), (22, 8, 2, 2.0, 3.0, 0.0, 0),
(23, 9, 0, 1.0, 1.0, 0.0, 1), (24, 9, 1, 2.0, 2.0, 0.0, 0), (25, 9, 2, 2.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(**kwargs, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 2, 1.0, 3.0, 0.0, 0),
(2, 1, 0, 1.0, 1.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 1, 1.0, 2.0, 0.0, 1), (6, 2, 2, 1.0, 3.0, 0.0, 0),
(7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 0), (9, 3, 2, 1.0, 3.0, 0.0, 1),
(10, 4, 0, 1.0, 1.0, 0.0, 0), (11, 4, 1, 1.0, 2.0, 0.0, 1), (12, 4, 2, 1.0, 3.0, 0.0, 0),
(13, 5, 0, 1.0, 1.0, 0.0, 1), (14, 5, 1, 1.0, 2.0, 0.0, 0), (15, 5, 2, 1.0, 3.0, 0.0, 1),
(16, 6, 0, 1.0, 1.0, 0.0, 0), (17, 6, 1, 2.0, 2.0, 0.0, 1), (18, 6, 2, 2.0, 3.0, 0.0, 0),
(19, 7, 0, 1.0, 1.0, 0.0, 1), (20, 7, 1, 2.0, 2.0, 0.0, 0), (21, 7, 2, 2.0, 3.0, 0.0, 1),
(22, 8, 0, 1.0, 1.0, 0.0, 0), (23, 8, 1, 1.0, 2.0, 0.0, 1), (24, 8, 2, 1.0, 3.0, 0.0, 0),
(25, 9, 0, 1.0, 1.0, 0.0, 1), (26, 9, 1, 1.0, 2.0, 0.0, 0), (27, 9, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_both(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 3, 1.0, 4.0, 0.0, 1), (1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 3, 2.0, 4.0, 0.0, 1),
(3, 2, 0, 1.0, 1.0, 0.0, 0), (4, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1), (2, 2, 0, 1.0, 1.0, 0.0, 0),
(3, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 0.25, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 0.5, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_both(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_signals_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 1, 0],
[1, 0, 2]
])
)
pf = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100.0, 1.0, 0.0, 0), (1, 2, 1, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 1, 2, 100.0, 1.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 0, 3, 100.0, 1.0, 0.0, 1),
(6, 2, 3, 100.0, 1.0, 0.0, 0), (7, 2, 4, 100.0, 1.0, 0.0, 1), (8, 1, 4, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_sl_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.0, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 3, 20.0, 2.0, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.25, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 1, 20.0, 4.25, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0), (6, 3, 1, 20.0, 4.0, 0.0, 1),
(7, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1),
(4, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 2.0, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 3, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0),
(4, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.75, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 1, 100.0, 1.75, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1), (6, 3, 1, 100.0, 2.0, 0.0, 0),
(7, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_ts_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(ts_stop=-0.1)
close = pd.Series([4., 5., 4., 3., 2.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.0, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 4, 25.0, 2.0, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.0, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1),
(4, 3, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
print('here')
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.25, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 2, 25.0, 4.25, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0), (6, 3, 2, 25.0, 4.125, 0.0, 1),
(7, 4, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.25, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1), (4, 2, 1, 25.0, 5.25, 0.0, 0),
(5, 3, 0, 25.0, 4.0, 0.0, 1), (6, 3, 1, 25.0, 5.25, 0.0, 0),
(7, 4, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([2., 1., 2., 3., 4.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 1.0, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0),
(4, 3, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 2.0, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 4, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 0.75, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0), (4, 2, 1, 50.0, 0.5, 0.0, 1),
(5, 3, 0, 50.0, 2.0, 0.0, 0),
(6, 4, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 1.75, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 2, 50.0, 1.75, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1), (6, 3, 2, 50.0, 1.75, 0.0, 0),
(7, 4, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_tp_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.0, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 3, 20.0, 2.0, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0),
(4, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.25, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 1, 20.0, 4.25, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1), (6, 3, 1, 20.0, 4.0, 0.0, 0),
(7, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 2.0, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 3, 100.0, 4.0, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 1.75, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 1, 100.0, 1.75, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0), (6, 3, 1, 100.0, 2.0, 0.0, 1),
(7, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1),
(4, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_stop_entry_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='val_price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.625, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.75, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='fillprice',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 3.0250000000000004, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 3, 16.52892561983471, 1.5125000000000002, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='close',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.5, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
def test_stop_exit_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 4.25, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.5, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stopmarket', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.825, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.25, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.125, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='close', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.6, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.7, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='price', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.9600000000000004, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.97, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9900000000000001, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_exit(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']],
accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_update(self):
entries = pd.Series([True, True, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
sl_stop = pd.Series([0.4, np.nan, np.nan, np.nan, np.nan])
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits, accumulate=True, size=1.,
sl_stop=sl_stop, upon_stop_update=[['keep', 'override', 'overridenan']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 0), (2, 0, 2, 2.0, 3.0, 0.0, 1),
(3, 1, 0, 1.0, 5.0, 0.0, 0), (4, 1, 1, 1.0, 4.0, 0.0, 0), (5, 1, 2, 2.0, 3.0, 0.0, 1),
(6, 2, 0, 1.0, 5.0, 0.0, 0), (7, 2, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
sl_stop = pd.Series([0.4, 0.4, np.nan, np.nan, np.nan])
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits, accumulate=True, size=1.,
sl_stop=sl_stop, upon_stop_update=[['keep', 'override']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 0), (2, 0, 2, 2.0, 3.0, 0.0, 1),
(3, 1, 0, 1.0, 5.0, 0.0, 0), (4, 1, 1, 1.0, 4.0, 0.0, 0), (5, 1, 3, 2.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_sl_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
@njit
def adjust_sl_func_nb(c, dur):
return 0. if c.i - c.init_i >= dur else c.curr_stop, c.curr_trail
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=np.inf, adjust_sl_func_nb=adjust_sl_func_nb, adjust_sl_args=(2,)).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0), (1, 0, 2, 20.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_ts_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([10., 11., 12., 11., 10.], index=price.index)
@njit
def adjust_sl_func_nb(c, dur):
return 0. if c.i - c.curr_i >= dur else c.curr_stop, c.curr_trail
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=np.inf, adjust_sl_func_nb=adjust_sl_func_nb, adjust_sl_args=(2,)).order_records,
np.array([
(0, 0, 0, 10.0, 10.0, 0.0, 0), (1, 0, 4, 10.0, 10.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_tp_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
@njit
def adjust_tp_func_nb(c, dur):
return 0. if c.i - c.init_i >= dur else c.curr_stop
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=np.inf, adjust_tp_func_nb=adjust_tp_func_nb, adjust_tp_args=(2,)).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_max_orders(self):
_ = from_signals_both(close=price_wide)
_ = from_signals_both(close=price_wide, max_orders=6)
with pytest.raises(Exception):
_ = from_signals_both(close=price_wide, max_orders=5)
def test_max_logs(self):
_ = from_signals_both(close=price_wide, log=True)
_ = from_signals_both(close=price_wide, log=True, max_logs=6)
with pytest.raises(Exception):
_ = from_signals_both(close=price_wide, log=True, max_logs=5)
# ############# from_holding ############# #
class TestFromHolding:
def test_from_holding(self):
record_arrays_close(
vbt.Portfolio.from_holding(price).order_records,
vbt.Portfolio.from_signals(price, True, False, accumulate=False).order_records
)
# ############# from_random_signals ############# #
class TestFromRandomSignals:
def test_from_random_n(self):
result = vbt.Portfolio.from_random_signals(price, n=2, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, True, False, False],
[False, True, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, n=[1, 2], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [True, False], [False, True], [False, False], [False, False]],
[[False, False], [False, True], [False, False], [False, True], [True, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.Int64Index([1, 2], dtype='int64', name='randnx_n')
)
def test_from_random_prob(self):
result = vbt.Portfolio.from_random_signals(price, prob=0.5, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, False, False, False],
[False, False, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, prob=[0.25, 0.5], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [False, False], [False, False], [False, False], [True, False]],
[[False, False], [False, True], [False, False], [False, False], [False, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.MultiIndex.from_tuples(
[(0.25, 0.25), (0.5, 0.5)],
names=['rprobnx_entry_prob', 'rprobnx_exit_prob'])
)
# ############# from_order_func ############# #
@njit
def order_func_nb(c, size):
_size = nb.get_elem_nb(c, size)
return nb.order_nb(_size if c.i % 2 == 0 else -_size)
@njit
def log_order_func_nb(c, size):
_size = nb.get_elem_nb(c, size)
return nb.order_nb(_size if c.i % 2 == 0 else -_size, log=True)
@njit
def flex_order_func_nb(c, size):
if c.call_idx < c.group_len:
_size = nb.get_col_elem_nb(c, c.from_col + c.call_idx, size)
return c.from_col + c.call_idx, nb.order_nb(_size if c.i % 2 == 0 else -_size)
return -1, nb.order_nothing_nb()
@njit
def log_flex_order_func_nb(c, size):
if c.call_idx < c.group_len:
_size = nb.get_col_elem_nb(c, c.from_col + c.call_idx, size)
return c.from_col + c.call_idx, nb.order_nb(_size if c.i % 2 == 0 else -_size, log=True)
return -1, nb.order_nothing_nb()
class TestFromOrderFunc:
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_one_column(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price.tolist(), order_func, np.asarray(np.inf), row_wise=test_row_wise, flexible=test_flexible)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1),
(2, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (3, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(4, 0, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price, order_func, np.asarray(np.inf), row_wise=test_row_wise, flexible=test_flexible)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1),
(2, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (3, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(4, 0, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
@pytest.mark.parametrize("test_use_numba", [False, True])
def test_multiple_columns(self, test_row_wise, test_flexible, test_use_numba):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, vbt.Rep('size'), broadcast_named_args=dict(size=[0, 1, np.inf]),
row_wise=test_row_wise, flexible=test_flexible, use_numba=test_use_numba)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 2, 0, 100.0, 1.0, 0.0, 0),
(2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 2, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 1.0, 3.0, 0.0, 0), (5, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(8, 1, 4, 1.0, 5.0, 0.0, 0), (9, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 1, 1.0, 2.0, 0.0, 1),
(2, 1, 2, 1.0, 3.0, 0.0, 0), (3, 1, 3, 1.0, 4.0, 0.0, 1),
(4, 1, 4, 1.0, 5.0, 0.0, 0), (5, 2, 0, 100.0, 1.0, 0.0, 0),
(6, 2, 1, 200.0, 2.0, 0.0, 1), (7, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 3, 66.66666666666669, 4.0, 0.0, 1), (9, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_group_by(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
group_by=np.array([0, 0, 1]), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 0, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 2, 1, 200.0, 2.0, 0.0, 1),
(6, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 1, 2, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(10, 1, 3, 66.66666666666669, 4.0, 0.0, 1), (11, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(12, 0, 4, 53.33333333333335, 5.0, 0.0, 0), (13, 1, 4, 53.33333333333335, 5.0, 0.0, 0),
(14, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 0, 1, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (5, 1, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 0, 3, 66.66666666666669, 4.0, 0.0, 1), (7, 1, 3, 66.66666666666669, 4.0, 0.0, 1),
(8, 0, 4, 53.33333333333335, 5.0, 0.0, 0), (9, 1, 4, 53.33333333333335, 5.0, 0.0, 0),
(10, 2, 0, 100.0, 1.0, 0.0, 0), (11, 2, 1, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(14, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_cash_sharing(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
group_by=np.array([0, 0, 1]), cash_sharing=True, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 0, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 0, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 0, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 0, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 2, 133.33333333, 3., 0., 0), (3, 0, 3, 66.66666667, 4., 0., 1),
(4, 0, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_call_seq(self, test_row_wise):
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 0, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 0, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 0, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 0, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 2, 133.33333333, 3., 0., 0), (3, 0, 3, 66.66666667, 4., 0., 1),
(4, 0, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed', row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 1, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 1, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 1, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 1, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 2, 133.33333333, 3., 0., 0), (3, 1, 3, 66.66666667, 4., 0., 1),
(4, 1, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 1, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 1, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 1, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 1, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 2, 133.33333333, 3., 0., 0), (3, 1, 3, 66.66666667, 4., 0., 1),
(4, 1, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='auto', row_wise=test_row_wise
)
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
@njit
def pre_segment_func_nb(c, target_hold_value):
order_size = np.copy(target_hold_value[c.i, c.from_col:c.to_col])
order_size_type = np.full(c.group_len, SizeType.TargetValue)
direction = np.full(c.group_len, Direction.Both)
order_value_out = np.empty(c.group_len, dtype=np.float_)
c.last_val_price[c.from_col:c.to_col] = c.close[c.i, c.from_col:c.to_col]
nb.sort_call_seq_nb(c, order_size, order_size_type, direction, order_value_out)
return order_size, order_size_type, direction
@njit
def pct_order_func_nb(c, order_size, order_size_type, direction):
col_i = c.call_seq_now[c.call_idx]
return nb.order_nb(
order_size[col_i],
c.close[c.i, col_i],
size_type=order_size_type[col_i],
direction=direction[col_i]
)
pf = vbt.Portfolio.from_order_func(
price_wide * 0 + 1, pct_order_func_nb, group_by=np.array([0, 0, 0]),
cash_sharing=True, pre_segment_func_nb=pre_segment_func_nb,
pre_segment_args=(target_hold_value.values,), row_wise=test_row_wise)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 1, 0],
[0, 2, 1],
[1, 0, 2],
[2, 1, 0]
])
)
pd.testing.assert_frame_equal(
pf.asset_value(group_by=False),
target_hold_value
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_target_value(self, test_row_wise, test_flexible):
@njit
def target_val_pre_segment_func_nb(c, val_price):
c.last_val_price[c.from_col:c.to_col] = val_price[c.i]
return ()
if test_flexible:
@njit
def target_val_order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(50., nb.get_col_elem_nb(c, col, c.close), size_type=SizeType.TargetValue)
return -1, nb.order_nothing_nb()
else:
@njit
def target_val_order_func_nb(c):
return nb.order_nb(50., nb.get_elem_nb(c, c.close), size_type=SizeType.TargetValue)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb,
pre_segment_func_nb=target_val_pre_segment_func_nb,
pre_segment_args=(price.iloc[:-1].values,), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 4.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 4.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_target_percent(self, test_row_wise, test_flexible):
@njit
def target_pct_pre_segment_func_nb(c, val_price):
c.last_val_price[c.from_col:c.to_col] = val_price[c.i]
return ()
if test_flexible:
@njit
def target_pct_order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(0.5, nb.get_col_elem_nb(c, col, c.close), size_type=SizeType.TargetPercent)
return -1, nb.order_nothing_nb()
else:
@njit
def target_pct_order_func_nb(c):
return nb.order_nb(0.5, nb.get_elem_nb(c, c.close), size_type=SizeType.TargetPercent)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb,
pre_segment_func_nb=target_pct_pre_segment_func_nb,
pre_segment_args=(price.iloc[:-1].values,), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 3, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 3, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_update_value(self, test_row_wise, test_flexible):
if test_flexible:
@njit
def order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
np.inf if c.i % 2 == 0 else -np.inf,
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
return -1, nb.order_nothing_nb()
else:
@njit
def order_func_nb(c):
return nb.order_nb(
np.inf if c.i % 2 == 0 else -np.inf,
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
@njit
def post_order_func_nb(c, value_before, value_now):
value_before[c.i, c.col] = c.value_before
value_now[c.i, c.col] = c.value_now
value_before = np.empty_like(price.values[:, None])
value_now = np.empty_like(price.values[:, None])
_ = vbt.Portfolio.from_order_func(
price,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_order_args=(value_before, value_now),
row_wise=test_row_wise,
update_value=False,
flexible=test_flexible)
np.testing.assert_array_equal(
value_before,
value_now
)
_ = vbt.Portfolio.from_order_func(
price,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_order_args=(value_before, value_now),
row_wise=test_row_wise,
update_value=True,
flexible=test_flexible)
np.testing.assert_array_equal(
value_before,
np.array([
[100.0],
[97.04930889128518],
[185.46988117104038],
[82.47853456223025],
[104.65775576218027]
])
)
np.testing.assert_array_equal(
value_now,
np.array([
[98.01980198019803],
[187.36243097890815],
[83.30331990785257],
[105.72569204546781],
[73.54075125567473]
])
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_states(self, test_row_wise, test_flexible):
close = np.array([
[1, 1, 1],
[np.nan, 2, 2],
[3, np.nan, 3],
[4, 4, np.nan],
[5, 5, 5]
])
size = np.array([
[1, 1, 1],
[-1, -1, -1],
[1, 1, 1],
[-1, -1, -1],
[1, 1, 1]
])
value_arr1 = np.empty((size.shape[0], 2), dtype=np.float_)
value_arr2 = np.empty(size.shape, dtype=np.float_)
value_arr3 = np.empty(size.shape, dtype=np.float_)
return_arr1 = np.empty((size.shape[0], 2), dtype=np.float_)
return_arr2 = np.empty(size.shape, dtype=np.float_)
return_arr3 = np.empty(size.shape, dtype=np.float_)
pos_record_arr1 = np.empty(size.shape, dtype=trade_dt)
pos_record_arr2 = np.empty(size.shape, dtype=trade_dt)
pos_record_arr3 = np.empty(size.shape, dtype=trade_dt)
def pre_segment_func_nb(c):
value_arr1[c.i, c.group] = c.last_value[c.group]
return_arr1[c.i, c.group] = c.last_return[c.group]
for col in range(c.from_col, c.to_col):
pos_record_arr1[c.i, col] = c.last_pos_record[col]
if c.i > 0:
c.last_val_price[c.from_col:c.to_col] = c.last_val_price[c.from_col:c.to_col] + 0.5
return ()
if test_flexible:
def order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
value_arr2[c.i, col] = c.last_value[c.group]
return_arr2[c.i, col] = c.last_return[c.group]
pos_record_arr2[c.i, col] = c.last_pos_record[col]
return col, nb.order_nb(size[c.i, col], fixed_fees=1.)
return -1, nb.order_nothing_nb()
else:
def order_func_nb(c):
value_arr2[c.i, c.col] = c.value_now
return_arr2[c.i, c.col] = c.return_now
pos_record_arr2[c.i, c.col] = c.pos_record_now
return nb.order_nb(size[c.i, c.col], fixed_fees=1.)
def post_order_func_nb(c):
value_arr3[c.i, c.col] = c.value_now
return_arr3[c.i, c.col] = c.return_now
pos_record_arr3[c.i, c.col] = c.pos_record_now
_ = vbt.Portfolio.from_order_func(
close,
order_func_nb,
pre_segment_func_nb=pre_segment_func_nb,
post_order_func_nb=post_order_func_nb,
use_numba=False,
row_wise=test_row_wise,
update_value=True,
ffill_val_price=True,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
value_arr1,
np.array([
[100.0, 100.0],
[98.0, 99.0],
[98.5, 99.0],
[99.0, 98.0],
[99.0, 98.5]
])
)
np.testing.assert_array_equal(
value_arr2,
np.array([
[100.0, 99.0, 100.0],
[99.0, 99.0, 99.5],
[99.0, 99.0, 99.0],
[100.0, 100.0, 98.5],
[99.0, 98.5, 99.0]
])
)
np.testing.assert_array_equal(
value_arr3,
np.array([
[99.0, 98.0, 99.0],
[99.0, 98.5, 99.0],
[99.0, 99.0, 98.0],
[100.0, 99.0, 98.5],
[98.5, 97.0, 99.0]
])
)
np.testing.assert_array_equal(
return_arr1,
np.array([
[np.nan, np.nan],
[-0.02, -0.01],
[0.00510204081632653, 0.0],
[0.005076142131979695, -0.010101010101010102],
[0.0, 0.00510204081632653]
])
)
np.testing.assert_array_equal(
return_arr2,
np.array([
[0.0, -0.01, 0.0],
[-0.01, -0.01, -0.005],
[0.01020408163265306, 0.01020408163265306, 0.0],
[0.015228426395939087, 0.015228426395939087, -0.005050505050505051],
[0.0, -0.005050505050505051, 0.01020408163265306]
])
)
np.testing.assert_array_equal(
return_arr3,
np.array([
[-0.01, -0.02, -0.01],
[-0.01, -0.015, -0.01],
[0.01020408163265306, 0.01020408163265306, -0.010101010101010102],
[0.015228426395939087, 0.005076142131979695, -0.005050505050505051],
[-0.005050505050505051, -0.020202020202020204, 0.01020408163265306]
])
)
record_arrays_close(
pos_record_arr1.flatten()[3:],
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -1.0, -0.3333333333333333, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.0, 0.25, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.0, -0.25, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1)
], dtype=trade_dt)
)
record_arrays_close(
pos_record_arr2.flatten()[3:],
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 1.0, 0.25, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.5, 0.375, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.5, -0.375, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 1)
], dtype=trade_dt)
)
record_arrays_close(
pos_record_arr3.flatten(),
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -1.0, -0.3333333333333333, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.0, 0.25, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.0, -0.25, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1),
(0, 0, 3.0, 0, 3.0, 3.0, -1, 4.0, 1.0, 1.0, 0.1111111111111111, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, 4, 5.0, 1.0, -3.0, -0.75, 1, 1, 1),
(1, 2, 2.0, 2, 4.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 1)
], dtype=trade_dt)
)
cash_arr = np.empty((size.shape[0], 2), dtype=np.float_)
position_arr = np.empty(size.shape, dtype=np.float_)
val_price_arr = np.empty(size.shape, dtype=np.float_)
value_arr = np.empty((size.shape[0], 2), dtype=np.float_)
return_arr = np.empty((size.shape[0], 2), dtype=np.float_)
sim_order_cash_arr = np.empty(size.shape, dtype=np.float_)
sim_order_value_arr = np.empty(size.shape, dtype=np.float_)
sim_order_return_arr = np.empty(size.shape, dtype=np.float_)
def post_order_func_nb(c):
sim_order_cash_arr[c.i, c.col] = c.cash_now
sim_order_value_arr[c.i, c.col] = c.value_now
sim_order_return_arr[c.i, c.col] = c.value_now
if c.i == 0 and c.call_idx == 0:
sim_order_return_arr[c.i, c.col] -= c.init_cash[c.group]
sim_order_return_arr[c.i, c.col] /= c.init_cash[c.group]
else:
if c.call_idx == 0:
prev_i = c.i - 1
prev_col = c.to_col - 1
else:
prev_i = c.i
prev_col = c.from_col + c.call_idx - 1
sim_order_return_arr[c.i, c.col] -= sim_order_value_arr[prev_i, prev_col]
sim_order_return_arr[c.i, c.col] /= sim_order_value_arr[prev_i, prev_col]
def post_segment_func_nb(c):
cash_arr[c.i, c.group] = c.last_cash[c.group]
for col in range(c.from_col, c.to_col):
position_arr[c.i, col] = c.last_position[col]
val_price_arr[c.i, col] = c.last_val_price[col]
value_arr[c.i, c.group] = c.last_value[c.group]
return_arr[c.i, c.group] = c.last_return[c.group]
pf = vbt.Portfolio.from_order_func(
close,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_segment_func_nb=post_segment_func_nb,
use_numba=False,
row_wise=test_row_wise,
update_value=True,
ffill_val_price=True,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
cash_arr,
pf.cash().values
)
np.testing.assert_array_equal(
position_arr,
pf.assets().values
)
np.testing.assert_array_equal(
val_price_arr,
pf.get_filled_close().values
)
np.testing.assert_array_equal(
value_arr,
pf.value().values
)
np.testing.assert_array_equal(
return_arr,
pf.returns().values
)
if test_flexible:
with pytest.raises(Exception):
pf.cash(in_sim_order=True, group_by=False)
with pytest.raises(Exception):
pf.value(in_sim_order=True, group_by=False)
with pytest.raises(Exception):
pf.returns(in_sim_order=True, group_by=False)
else:
np.testing.assert_array_equal(
sim_order_cash_arr,
pf.cash(in_sim_order=True, group_by=False).values
)
np.testing.assert_array_equal(
sim_order_value_arr,
pf.value(in_sim_order=True, group_by=False).values
)
np.testing.assert_array_equal(
sim_order_return_arr,
pf.returns(in_sim_order=True, group_by=False).values
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_post_sim_ctx(self, test_row_wise, test_flexible):
if test_flexible:
def order_func(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
1.,
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01,
log=True
)
return -1, nb.order_nothing_nb()
else:
def order_func(c):
return nb.order_nb(
1.,
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01,
log=True
)
def post_sim_func(c, lst):
lst.append(deepcopy(c))
lst = []
_ = vbt.Portfolio.from_order_func(
price_wide,
order_func,
post_sim_func_nb=post_sim_func,
post_sim_args=(lst,),
row_wise=test_row_wise,
update_value=True,
max_logs=price_wide.shape[0] * price_wide.shape[1],
use_numba=False,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
c = lst[-1]
assert c.target_shape == price_wide.shape
np.testing.assert_array_equal(
c.close,
price_wide.values
)
np.testing.assert_array_equal(
c.group_lens,
np.array([2, 1])
)
np.testing.assert_array_equal(
c.init_cash,
np.array([100., 100.])
)
assert c.cash_sharing
if test_flexible:
assert c.call_seq is None
else:
np.testing.assert_array_equal(
c.call_seq,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
np.testing.assert_array_equal(
c.segment_mask,
np.array([
[True, True],
[True, True],
[True, True],
[True, True],
[True, True]
])
)
assert c.ffill_val_price
assert c.update_value
if test_row_wise:
record_arrays_close(
c.order_records,
np.array([
(0, 0, 0, 1.0, 1.01, 1.0101, 0), (1, 1, 0, 1.0, 1.01, 1.0101, 0),
(2, 2, 0, 1.0, 1.01, 1.0101, 0), (3, 0, 1, 1.0, 2.02, 1.0202, 0),
(4, 1, 1, 1.0, 2.02, 1.0202, 0), (5, 2, 1, 1.0, 2.02, 1.0202, 0),
(6, 0, 2, 1.0, 3.0300000000000002, 1.0303, 0), (7, 1, 2, 1.0, 3.0300000000000002, 1.0303, 0),
(8, 2, 2, 1.0, 3.0300000000000002, 1.0303, 0), (9, 0, 3, 1.0, 4.04, 1.0404, 0),
(10, 1, 3, 1.0, 4.04, 1.0404, 0), (11, 2, 3, 1.0, 4.04, 1.0404, 0),
(12, 0, 4, 1.0, 5.05, 1.0505, 0), (13, 1, 4, 1.0, 5.05, 1.0505, 0),
(14, 2, 4, 1.0, 5.05, 1.0505, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
c.order_records,
np.array([
(0, 0, 0, 1.0, 1.01, 1.0101, 0), (1, 1, 0, 1.0, 1.01, 1.0101, 0),
(2, 0, 1, 1.0, 2.02, 1.0202, 0), (3, 1, 1, 1.0, 2.02, 1.0202, 0),
(4, 0, 2, 1.0, 3.0300000000000002, 1.0303, 0), (5, 1, 2, 1.0, 3.0300000000000002, 1.0303, 0),
(6, 0, 3, 1.0, 4.04, 1.0404, 0), (7, 1, 3, 1.0, 4.04, 1.0404, 0),
(8, 0, 4, 1.0, 5.05, 1.0505, 0), (9, 1, 4, 1.0, 5.05, 1.0505, 0),
(10, 2, 0, 1.0, 1.01, 1.0101, 0), (11, 2, 1, 1.0, 2.02, 1.0202, 0),
(12, 2, 2, 1.0, 3.0300000000000002, 1.0303, 0), (13, 2, 3, 1.0, 4.04, 1.0404, 0),
(14, 2, 4, 1.0, 5.05, 1.0505, 0)
], dtype=order_dt)
)
if test_row_wise:
record_arrays_close(
c.log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01, 1.0,
0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0, 97.9799,
1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 0),
(1, 0, 1, 0, 97.9799, 0.0, 0.0, 97.9799, np.nan, 98.9899, 1.0, 1.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 95.9598, 1.0,
0.0, 95.9598, 1.01, 97.97980000000001, 1.0, 1.01, 1.0101, 0, 0, -1, 1),
(2, 1, 2, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0,
97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 2),
(3, 0, 0, 1, 95.9598, 1.0, 0.0, 95.9598, 1.0, 97.9598, 1.0, 2.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 92.9196,
2.0, 0.0, 92.9196, 2.02, 97.95960000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 3),
(4, 0, 1, 1, 92.9196, 1.0, 0.0, 92.9196, 1.0, 97.95960000000001, 1.0, 2.0,
0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 89.8794,
2.0, 0.0, 89.8794, 2.02, 97.95940000000002, 1.0, 2.02, 1.0202, 0, 0, -1, 4),
(5, 1, 2, 1, 97.9799, 1.0, 0.0, 97.9799, 1.0, 98.9799, 1.0, 2.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 94.9397, 2.0,
0.0, 94.9397, 2.02, 98.97970000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 5),
(6, 0, 0, 2, 89.8794, 2.0, 0.0, 89.8794, 2.0, 97.8794, 1.0, 3.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.8191, 3.0,
0.0, 85.8191, 3.0300000000000002, 98.90910000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 6),
(7, 0, 1, 2, 85.8191, 2.0, 0.0, 85.8191, 2.0, 98.90910000000001,
1.0, 3.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0300000000000002,
99.93880000000001, 1.0, 3.0300000000000002, 1.0303, 0, 0, -1, 7),
(8, 1, 2, 2, 94.9397, 2.0, 0.0, 94.9397, 2.0, 98.9397, 1.0, 3.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 90.8794,
3.0, 0.0, 90.8794, 3.0300000000000002, 99.96940000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 8),
(9, 0, 0, 3, 81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0, 99.75880000000001,
1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
76.67840000000001, 4.0, 0.0, 76.67840000000001, 4.04, 101.83840000000001,
1.0, 4.04, 1.0404, 0, 0, -1, 9),
(10, 0, 1, 3, 76.67840000000001, 3.0, 0.0, 76.67840000000001, 3.0,
101.83840000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 71.59800000000001, 4.0, 0.0, 71.59800000000001,
4.04, 103.918, 1.0, 4.04, 1.0404, 0, 0, -1, 10),
(11, 1, 2, 3, 90.8794, 3.0, 0.0, 90.8794, 3.0, 99.8794, 1.0, 4.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.799, 4.0,
0.0, 85.799, 4.04, 101.959, 1.0, 4.04, 1.0404, 0, 0, -1, 11),
(12, 0, 0, 4, 71.59800000000001, 4.0, 0.0, 71.59800000000001, 4.0,
103.59800000000001, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 65.49750000000002, 5.0, 0.0, 65.49750000000002,
5.05, 106.74750000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 12),
(13, 0, 1, 4, 65.49750000000002, 4.0, 0.0, 65.49750000000002, 4.0,
106.74750000000002, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 59.39700000000002, 5.0, 0.0, 59.39700000000002,
5.05, 109.89700000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 13),
(14, 1, 2, 4, 85.799, 4.0, 0.0, 85.799, 4.0, 101.799, 1.0, 5.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 79.69850000000001,
5.0, 0.0, 79.69850000000001, 5.05, 104.94850000000001, 1.0, 5.05, 1.0505, 0, 0, -1, 14)
], dtype=log_dt)
)
else:
record_arrays_close(
c.log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799,
1.0, 0.0, 97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 0),
(1, 0, 1, 0, 97.9799, 0.0, 0.0, 97.9799, np.nan, 98.9899, 1.0, 1.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 95.9598,
1.0, 0.0, 95.9598, 1.01, 97.97980000000001, 1.0, 1.01, 1.0101, 0, 0, -1, 1),
(2, 0, 0, 1, 95.9598, 1.0, 0.0, 95.9598, 1.0, 97.9598, 1.0, 2.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 92.9196, 2.0,
0.0, 92.9196, 2.02, 97.95960000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 2),
(3, 0, 1, 1, 92.9196, 1.0, 0.0, 92.9196, 1.0, 97.95960000000001, 1.0,
2.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 89.8794,
2.0, 0.0, 89.8794, 2.02, 97.95940000000002, 1.0, 2.02, 1.0202, 0, 0, -1, 3),
(4, 0, 0, 2, 89.8794, 2.0, 0.0, 89.8794, 2.0, 97.8794, 1.0, 3.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.8191,
3.0, 0.0, 85.8191, 3.0300000000000002, 98.90910000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 4),
(5, 0, 1, 2, 85.8191, 2.0, 0.0, 85.8191, 2.0, 98.90910000000001, 1.0,
3.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0300000000000002,
99.93880000000001, 1.0, 3.0300000000000002, 1.0303, 0, 0, -1, 5),
(6, 0, 0, 3, 81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0,
99.75880000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 76.67840000000001, 4.0, 0.0, 76.67840000000001,
4.04, 101.83840000000001, 1.0, 4.04, 1.0404, 0, 0, -1, 6),
(7, 0, 1, 3, 76.67840000000001, 3.0, 0.0, 76.67840000000001, 3.0,
101.83840000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 71.59800000000001, 4.0, 0.0, 71.59800000000001,
4.04, 103.918, 1.0, 4.04, 1.0404, 0, 0, -1, 7),
(8, 0, 0, 4, 71.59800000000001, 4.0, 0.0, 71.59800000000001, 4.0,
103.59800000000001, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 65.49750000000002, 5.0, 0.0, 65.49750000000002,
5.05, 106.74750000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 8),
(9, 0, 1, 4, 65.49750000000002, 4.0, 0.0, 65.49750000000002, 4.0,
106.74750000000002, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 59.39700000000002, 5.0, 0.0, 59.39700000000002,
5.05, 109.89700000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 9),
(10, 1, 2, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0,
97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 10),
(11, 1, 2, 1, 97.9799, 1.0, 0.0, 97.9799, 1.0, 98.9799, 1.0, 2.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 94.9397,
2.0, 0.0, 94.9397, 2.02, 98.97970000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 11),
(12, 1, 2, 2, 94.9397, 2.0, 0.0, 94.9397, 2.0, 98.9397, 1.0, 3.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 90.8794,
3.0, 0.0, 90.8794, 3.0300000000000002, 99.96940000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 12),
(13, 1, 2, 3, 90.8794, 3.0, 0.0, 90.8794, 3.0, 99.8794, 1.0, 4.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.799, 4.0,
0.0, 85.799, 4.04, 101.959, 1.0, 4.04, 1.0404, 0, 0, -1, 13),
(14, 1, 2, 4, 85.799, 4.0, 0.0, 85.799, 4.0, 101.799, 1.0, 5.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 79.69850000000001,
5.0, 0.0, 79.69850000000001, 5.05, 104.94850000000001, 1.0, 5.05, 1.0505, 0, 0, -1, 14)
], dtype=log_dt)
)
np.testing.assert_array_equal(
c.last_cash,
np.array([59.39700000000002, 79.69850000000001])
)
np.testing.assert_array_equal(
c.last_position,
np.array([5., 5., 5.])
)
np.testing.assert_array_equal(
c.last_val_price,
np.array([5.0, 5.0, 5.0])
)
np.testing.assert_array_equal(
c.last_value,
np.array([109.39700000000002, 104.69850000000001])
)
np.testing.assert_array_equal(
c.second_last_value,
np.array([103.59800000000001, 101.799])
)
np.testing.assert_array_equal(
c.last_return,
np.array([0.05597598409235705, 0.028482598060884715])
)
np.testing.assert_array_equal(
c.last_debt,
np.array([0., 0., 0.])
)
np.testing.assert_array_equal(
c.last_free_cash,
np.array([59.39700000000002, 79.69850000000001])
)
if test_row_wise:
np.testing.assert_array_equal(
c.last_oidx,
np.array([12, 13, 14])
)
np.testing.assert_array_equal(
c.last_lidx,
np.array([12, 13, 14])
)
else:
np.testing.assert_array_equal(
c.last_oidx,
np.array([8, 9, 14])
)
np.testing.assert_array_equal(
c.last_lidx,
np.array([8, 9, 14])
)
assert c.order_records[c.last_oidx[0]]['col'] == 0
assert c.order_records[c.last_oidx[1]]['col'] == 1
assert c.order_records[c.last_oidx[2]]['col'] == 2
assert c.log_records[c.last_lidx[0]]['col'] == 0
assert c.log_records[c.last_lidx[1]]['col'] == 1
assert c.log_records[c.last_lidx[2]]['col'] == 2
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_free_cash(self, test_row_wise, test_flexible):
if test_flexible:
def order_func(c, size):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
size[c.i, col],
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
return -1, nb.order_nothing_nb()
else:
def order_func(c, size):
return nb.order_nb(
size[c.i, c.col],
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
def post_order_func(c, debt, free_cash):
debt[c.i, c.col] = c.debt_now
if c.cash_sharing:
free_cash[c.i, c.group] = c.free_cash_now
else:
free_cash[c.i, c.col] = c.free_cash_now
size = np.array([
[5, -5, 5],
[5, -5, -10],
[-5, 5, 10],
[-5, 5, -10],
[-5, 5, 10]
])
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty(price_wide.shape, dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide,
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 4.95, 0.0],
[0.0, 14.850000000000001, 9.9],
[0.0, 7.425000000000001, 0.0],
[0.0, 0.0, 19.8],
[24.75, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[93.8995, 94.0005, 93.8995],
[82.6985, 83.00150000000001, 92.70150000000001],
[96.39999999999999, 81.55000000000001, 80.8985],
[115.002, 74.998, 79.5025],
[89.0045, 48.49550000000001, 67.0975]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty(price_wide.shape, dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide.vbt.wrapper.wrap(price_wide.values[::-1]),
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 24.75, 0.0],
[0.0, 44.55, 19.8],
[0.0, 22.275, 0.0],
[0.0, 0.0, 9.9],
[4.95, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[73.4975, 74.0025, 73.4975],
[52.0955, 53.00449999999999, 72.1015],
[65.797, 81.25299999999999, 80.0985],
[74.598, 114.60199999999998, 78.9005],
[68.5985, 108.50149999999998, 87.49949999999998]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty((price_wide.shape[0], 2), dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide,
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 4.95, 0.0],
[0.0, 14.850000000000001, 9.9],
[0.0, 7.425000000000001, 0.0],
[0.0, 0.0, 19.8],
[24.75, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[87.9, 93.8995],
[65.70000000000002, 92.70150000000001],
[77.95000000000002, 80.8985],
[90.00000000000001, 79.5025],
[37.500000000000014, 67.0975]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_init_cash(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=[1., 10., np.inf], flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 10.0, 1.0, 0.0, 0),
(2, 2, 0, 10.0, 1.0, 0.0, 0), (3, 0, 1, 10.0, 2.0, 0.0, 1),
(4, 1, 1, 10.0, 2.0, 0.0, 1), (5, 2, 1, 10.0, 2.0, 0.0, 1),
(6, 0, 2, 6.666666666666667, 3.0, 0.0, 0), (7, 1, 2, 6.666666666666667, 3.0, 0.0, 0),
(8, 2, 2, 10.0, 3.0, 0.0, 0), (9, 0, 3, 10.0, 4.0, 0.0, 1),
(10, 1, 3, 10.0, 4.0, 0.0, 1), (11, 2, 3, 10.0, 4.0, 0.0, 1),
(12, 0, 4, 8.0, 5.0, 0.0, 0), (13, 1, 4, 8.0, 5.0, 0.0, 0),
(14, 2, 4, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 10.0, 2.0, 0.0, 1),
(2, 0, 2, 6.666666666666667, 3.0, 0.0, 0), (3, 0, 3, 10.0, 4.0, 0.0, 1),
(4, 0, 4, 8.0, 5.0, 0.0, 0), (5, 1, 0, 10.0, 1.0, 0.0, 0),
(6, 1, 1, 10.0, 2.0, 0.0, 1), (7, 1, 2, 6.666666666666667, 3.0, 0.0, 0),
(8, 1, 3, 10.0, 4.0, 0.0, 1), (9, 1, 4, 8.0, 5.0, 0.0, 0),
(10, 2, 0, 10.0, 1.0, 0.0, 0), (11, 2, 1, 10.0, 2.0, 0.0, 1),
(12, 2, 2, 10.0, 3.0, 0.0, 0), (13, 2, 3, 10.0, 4.0, 0.0, 1),
(14, 2, 4, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
assert type(pf._init_cash) == np.ndarray
base_pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=np.inf, flexible=test_flexible)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=InitCashMode.Auto, flexible=test_flexible)
record_arrays_close(
pf.order_records,
base_pf.orders.values
)
assert pf._init_cash == InitCashMode.Auto
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=InitCashMode.AutoAlign, flexible=test_flexible)
record_arrays_close(
pf.order_records,
base_pf.orders.values
)
assert pf._init_cash == InitCashMode.AutoAlign
def test_func_calls(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_group_func_nb(c, call_i, pre_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_group_lst.append(call_i[0])
return (call_i,)
@njit
def post_group_func_nb(c, call_i, post_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_group_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 56
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [56]
assert list(pre_group_lst) == [2, 34]
assert list(post_group_lst) == [33, 55]
assert list(pre_segment_lst) == [3, 9, 15, 21, 27, 35, 39, 43, 47, 51]
assert list(post_segment_lst) == [8, 14, 20, 26, 32, 38, 42, 46, 50, 54]
assert list(order_lst) == [4, 6, 10, 12, 16, 18, 22, 24, 28, 30, 36, 40, 44, 48, 52]
assert list(post_order_lst) == [5, 7, 11, 13, 17, 19, 23, 25, 29, 31, 37, 41, 45, 49, 53]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 38
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [38]
assert list(pre_group_lst) == [2, 22]
assert list(post_group_lst) == [21, 37]
assert list(pre_segment_lst) == [3, 5, 7, 13, 19, 23, 25, 29, 31, 35]
assert list(post_segment_lst) == [4, 6, 12, 18, 20, 24, 28, 30, 34, 36]
assert list(order_lst) == [8, 10, 14, 16, 26, 32]
assert list(post_order_lst) == [9, 11, 15, 17, 27, 33]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 26
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [26]
assert list(pre_group_lst) == [2, 16]
assert list(post_group_lst) == [15, 25]
assert list(pre_segment_lst) == [3, 9, 17, 21]
assert list(post_segment_lst) == [8, 14, 20, 24]
assert list(order_lst) == [4, 6, 10, 12, 18, 22]
assert list(post_order_lst) == [5, 7, 11, 13, 19, 23]
def test_func_calls_flexible(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_group_func_nb(c, call_i, pre_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_group_lst.append(call_i[0])
return (call_i,)
@njit
def post_group_func_nb(c, call_i, post_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_group_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def flex_order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, NoOrder
return -1, NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 66
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [66]
assert list(pre_group_lst) == [2, 39]
assert list(post_group_lst) == [38, 65]
assert list(pre_segment_lst) == [3, 10, 17, 24, 31, 40, 45, 50, 55, 60]
assert list(post_segment_lst) == [9, 16, 23, 30, 37, 44, 49, 54, 59, 64]
assert list(order_lst) == [
4, 6, 8, 11, 13, 15, 18, 20, 22, 25, 27, 29, 32, 34,
36, 41, 43, 46, 48, 51, 53, 56, 58, 61, 63
]
assert list(post_order_lst) == [5, 7, 12, 14, 19, 21, 26, 28, 33, 35, 42, 47, 52, 57, 62]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 42
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [42]
assert list(pre_group_lst) == [2, 24]
assert list(post_group_lst) == [23, 41]
assert list(pre_segment_lst) == [3, 5, 7, 14, 21, 25, 27, 32, 34, 39]
assert list(post_segment_lst) == [4, 6, 13, 20, 22, 26, 31, 33, 38, 40]
assert list(order_lst) == [8, 10, 12, 15, 17, 19, 28, 30, 35, 37]
assert list(post_order_lst) == [9, 11, 16, 18, 29, 36]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 30
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [30]
assert list(pre_group_lst) == [2, 18]
assert list(post_group_lst) == [17, 29]
assert list(pre_segment_lst) == [3, 10, 19, 24]
assert list(post_segment_lst) == [9, 16, 23, 28]
assert list(order_lst) == [4, 6, 8, 11, 13, 15, 20, 22, 25, 27]
assert list(post_order_lst) == [5, 7, 12, 14, 21, 26]
def test_func_calls_row_wise(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst):
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst):
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_row_func_nb(c, call_i, pre_row_lst):
call_i[0] += 1
pre_row_lst.append(call_i[0])
return (call_i,)
@njit
def post_row_func_nb(c, call_i, post_row_lst):
call_i[0] += 1
post_row_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst):
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst):
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(c, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst):
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 62
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [62]
assert list(pre_row_lst) == [2, 14, 26, 38, 50]
assert list(post_row_lst) == [13, 25, 37, 49, 61]
assert list(pre_segment_lst) == [3, 9, 15, 21, 27, 33, 39, 45, 51, 57]
assert list(post_segment_lst) == [8, 12, 20, 24, 32, 36, 44, 48, 56, 60]
assert list(order_lst) == [4, 6, 10, 16, 18, 22, 28, 30, 34, 40, 42, 46, 52, 54, 58]
assert list(post_order_lst) == [5, 7, 11, 17, 19, 23, 29, 31, 35, 41, 43, 47, 53, 55, 59]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 44
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [44]
assert list(pre_row_lst) == [2, 8, 16, 26, 38]
assert list(post_row_lst) == [7, 15, 25, 37, 43]
assert list(pre_segment_lst) == [3, 5, 9, 11, 17, 23, 27, 33, 39, 41]
assert list(post_segment_lst) == [4, 6, 10, 14, 22, 24, 32, 36, 40, 42]
assert list(order_lst) == [12, 18, 20, 28, 30, 34]
assert list(post_order_lst) == [13, 19, 21, 29, 31, 35]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 32
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [32]
assert list(pre_row_lst) == [2, 4, 10, 18, 30]
assert list(post_row_lst) == [3, 9, 17, 29, 31]
assert list(pre_segment_lst) == [5, 11, 19, 25]
assert list(post_segment_lst) == [8, 16, 24, 28]
assert list(order_lst) == [6, 12, 14, 20, 22, 26]
assert list(post_order_lst) == [7, 13, 15, 21, 23, 27]
def test_func_calls_row_wise_flexible(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_row_func_nb(c, call_i, pre_row_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_row_lst.append(call_i[0])
return (call_i,)
@njit
def post_row_func_nb(c, call_i, post_row_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_row_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def flex_order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, NoOrder
return -1, NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 72
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [72]
assert list(pre_row_lst) == [2, 16, 30, 44, 58]
assert list(post_row_lst) == [15, 29, 43, 57, 71]
assert list(pre_segment_lst) == [3, 10, 17, 24, 31, 38, 45, 52, 59, 66]
assert list(post_segment_lst) == [9, 14, 23, 28, 37, 42, 51, 56, 65, 70]
assert list(order_lst) == [
4, 6, 8, 11, 13, 18, 20, 22, 25, 27, 32, 34, 36,
39, 41, 46, 48, 50, 53, 55, 60, 62, 64, 67, 69
]
assert list(post_order_lst) == [5, 7, 12, 19, 21, 26, 33, 35, 40, 47, 49, 54, 61, 63, 68]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 48
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [48]
assert list(pre_row_lst) == [2, 8, 17, 28, 42]
assert list(post_row_lst) == [7, 16, 27, 41, 47]
assert list(pre_segment_lst) == [3, 5, 9, 11, 18, 25, 29, 36, 43, 45]
assert list(post_segment_lst) == [4, 6, 10, 15, 24, 26, 35, 40, 44, 46]
assert list(order_lst) == [12, 14, 19, 21, 23, 30, 32, 34, 37, 39]
assert list(post_order_lst) == [13, 20, 22, 31, 33, 38]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 36
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [36]
assert list(pre_row_lst) == [2, 4, 11, 20, 34]
assert list(post_row_lst) == [3, 10, 19, 33, 35]
assert list(pre_segment_lst) == [5, 12, 21, 28]
assert list(post_segment_lst) == [9, 18, 27, 32]
assert list(order_lst) == [6, 8, 13, 15, 17, 22, 24, 26, 29, 31]
assert list(post_order_lst) == [7, 14, 16, 23, 25, 30]
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_max_orders(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, flexible=test_flexible)
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_orders=15, flexible=test_flexible)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_orders=14, flexible=test_flexible)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_max_logs(self, test_row_wise, test_flexible):
log_order_func = log_flex_order_func_nb if test_flexible else log_order_func_nb
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, flexible=test_flexible)
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_logs=15, flexible=test_flexible)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_logs=14, flexible=test_flexible)
# ############# Portfolio ############# #
price_na = pd.DataFrame({
'a': [np.nan, 2., 3., 4., 5.],
'b': [1., 2., np.nan, 4., 5.],
'c': [1., 2., 3., 4., np.nan]
}, index=price.index)
order_size_new = pd.Series([1., 0.1, -1., -0.1, 1.])
directions = ['longonly', 'shortonly', 'both']
group_by = pd.Index(['first', 'first', 'second'], name='group')
pf = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=None,
init_cash=[100., 100., 100.], freq='1D', attach_call_seq=True
) # independent
pf_grouped = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=False,
init_cash=[100., 100., 100.], freq='1D', attach_call_seq=True
) # grouped
pf_shared = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=True,
init_cash=[200., 100.], freq='1D', attach_call_seq=True
) # shared
class TestPortfolio:
def test_config(self, tmp_path):
pf2 = pf.copy()
pf2._metrics = pf2._metrics.copy()
pf2.metrics['hello'] = 'world'
pf2._subplots = pf2.subplots.copy()
pf2.subplots['hello'] = 'world'
assert vbt.Portfolio.loads(pf2['a'].dumps()) == pf2['a']
assert vbt.Portfolio.loads(pf2.dumps()) == pf2
pf2.save(tmp_path / 'pf')
assert vbt.Portfolio.load(tmp_path / 'pf') == pf2
def test_wrapper(self):
pd.testing.assert_index_equal(
pf.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
price_na.columns
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.grouper.group_by is None
assert pf.wrapper.grouper.allow_enable
assert pf.wrapper.grouper.allow_disable
assert pf.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
pf_grouped.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf_grouped.wrapper.columns,
price_na.columns
)
assert pf_grouped.wrapper.ndim == 2
pd.testing.assert_index_equal(
pf_grouped.wrapper.grouper.group_by,
group_by
)
assert pf_grouped.wrapper.grouper.allow_enable
assert pf_grouped.wrapper.grouper.allow_disable
assert pf_grouped.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
pf_shared.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf_shared.wrapper.columns,
price_na.columns
)
assert pf_shared.wrapper.ndim == 2
pd.testing.assert_index_equal(
pf_shared.wrapper.grouper.group_by,
group_by
)
assert not pf_shared.wrapper.grouper.allow_enable
assert pf_shared.wrapper.grouper.allow_disable
assert not pf_shared.wrapper.grouper.allow_modify
def test_indexing(self):
assert pf['a'].wrapper == pf.wrapper['a']
assert pf['a'].orders == pf.orders['a']
assert pf['a'].logs == pf.logs['a']
assert pf['a'].init_cash == pf.init_cash['a']
pd.testing.assert_series_equal(pf['a'].call_seq, pf.call_seq['a'])
assert pf['c'].wrapper == pf.wrapper['c']
assert pf['c'].orders == pf.orders['c']
assert pf['c'].logs == pf.logs['c']
assert pf['c'].init_cash == pf.init_cash['c']
pd.testing.assert_series_equal(pf['c'].call_seq, pf.call_seq['c'])
assert pf[['c']].wrapper == pf.wrapper[['c']]
assert pf[['c']].orders == pf.orders[['c']]
assert pf[['c']].logs == pf.logs[['c']]
pd.testing.assert_series_equal(pf[['c']].init_cash, pf.init_cash[['c']])
pd.testing.assert_frame_equal(pf[['c']].call_seq, pf.call_seq[['c']])
assert pf_grouped['first'].wrapper == pf_grouped.wrapper['first']
assert pf_grouped['first'].orders == pf_grouped.orders['first']
assert pf_grouped['first'].logs == pf_grouped.logs['first']
assert pf_grouped['first'].init_cash == pf_grouped.init_cash['first']
pd.testing.assert_frame_equal(pf_grouped['first'].call_seq, pf_grouped.call_seq[['a', 'b']])
assert pf_grouped[['first']].wrapper == pf_grouped.wrapper[['first']]
assert pf_grouped[['first']].orders == pf_grouped.orders[['first']]
assert pf_grouped[['first']].logs == pf_grouped.logs[['first']]
pd.testing.assert_series_equal(
pf_grouped[['first']].init_cash,
pf_grouped.init_cash[['first']])
pd.testing.assert_frame_equal(pf_grouped[['first']].call_seq, pf_grouped.call_seq[['a', 'b']])
assert pf_grouped['second'].wrapper == pf_grouped.wrapper['second']
assert pf_grouped['second'].orders == pf_grouped.orders['second']
assert pf_grouped['second'].logs == pf_grouped.logs['second']
assert pf_grouped['second'].init_cash == pf_grouped.init_cash['second']
pd.testing.assert_series_equal(pf_grouped['second'].call_seq, pf_grouped.call_seq['c'])
assert pf_grouped[['second']].orders == pf_grouped.orders[['second']]
assert pf_grouped[['second']].wrapper == pf_grouped.wrapper[['second']]
assert pf_grouped[['second']].orders == pf_grouped.orders[['second']]
assert pf_grouped[['second']].logs == pf_grouped.logs[['second']]
pd.testing.assert_series_equal(
pf_grouped[['second']].init_cash,
pf_grouped.init_cash[['second']])
pd.testing.assert_frame_equal(pf_grouped[['second']].call_seq, pf_grouped.call_seq[['c']])
assert pf_shared['first'].wrapper == pf_shared.wrapper['first']
assert pf_shared['first'].orders == pf_shared.orders['first']
assert pf_shared['first'].logs == pf_shared.logs['first']
assert pf_shared['first'].init_cash == pf_shared.init_cash['first']
pd.testing.assert_frame_equal(pf_shared['first'].call_seq, pf_shared.call_seq[['a', 'b']])
assert pf_shared[['first']].orders == pf_shared.orders[['first']]
assert pf_shared[['first']].wrapper == pf_shared.wrapper[['first']]
assert pf_shared[['first']].orders == pf_shared.orders[['first']]
assert pf_shared[['first']].logs == pf_shared.logs[['first']]
pd.testing.assert_series_equal(
pf_shared[['first']].init_cash,
pf_shared.init_cash[['first']])
pd.testing.assert_frame_equal(pf_shared[['first']].call_seq, pf_shared.call_seq[['a', 'b']])
assert pf_shared['second'].wrapper == pf_shared.wrapper['second']
assert pf_shared['second'].orders == pf_shared.orders['second']
assert pf_shared['second'].logs == pf_shared.logs['second']
assert pf_shared['second'].init_cash == pf_shared.init_cash['second']
pd.testing.assert_series_equal(pf_shared['second'].call_seq, pf_shared.call_seq['c'])
assert pf_shared[['second']].wrapper == pf_shared.wrapper[['second']]
assert pf_shared[['second']].orders == pf_shared.orders[['second']]
assert pf_shared[['second']].logs == pf_shared.logs[['second']]
pd.testing.assert_series_equal(
pf_shared[['second']].init_cash,
pf_shared.init_cash[['second']])
pd.testing.assert_frame_equal(pf_shared[['second']].call_seq, pf_shared.call_seq[['c']])
def test_regroup(self):
assert pf.regroup(None) == pf
assert pf.regroup(False) == pf
assert pf.regroup(group_by) != pf
pd.testing.assert_index_equal(pf.regroup(group_by).wrapper.grouper.group_by, group_by)
assert pf_grouped.regroup(None) == pf_grouped
assert pf_grouped.regroup(False) != pf_grouped
assert pf_grouped.regroup(False).wrapper.grouper.group_by is None
assert pf_grouped.regroup(group_by) == pf_grouped
assert pf_shared.regroup(None) == pf_shared
with pytest.raises(Exception):
_ = pf_shared.regroup(False)
assert pf_shared.regroup(group_by) == pf_shared
def test_cash_sharing(self):
assert not pf.cash_sharing
assert not pf_grouped.cash_sharing
assert pf_shared.cash_sharing
def test_call_seq(self):
pd.testing.assert_frame_equal(
pf.call_seq,
pd.DataFrame(
np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_grouped.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
def test_orders(self):
record_arrays_close(
pf.orders.values,
np.array([
(0, 0, 1, 0.1, 2.02, 0.10202, 0), (1, 0, 2, 0.1, 2.9699999999999998, 0.10297, 1),
(2, 0, 4, 1.0, 5.05, 0.1505, 0), (3, 1, 0, 1.0, 0.99, 0.10990000000000001, 1),
(4, 1, 1, 0.1, 1.98, 0.10198, 1), (5, 1, 3, 0.1, 4.04, 0.10404000000000001, 0),
(6, 1, 4, 1.0, 4.95, 0.14950000000000002, 1), (7, 2, 0, 1.0, 1.01, 0.1101, 0),
(8, 2, 1, 0.1, 2.02, 0.10202, 0), (9, 2, 2, 1.0, 2.9699999999999998, 0.1297, 1),
(10, 2, 3, 0.1, 3.96, 0.10396000000000001, 1)
], dtype=order_dt)
)
result = pd.Series(
np.array([3, 4, 4]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.orders.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_orders(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_orders(group_by=False).count(),
result
)
result = pd.Series(
np.array([7, 4]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_orders(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.orders.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.orders.count(),
result
)
def test_logs(self):
record_arrays_close(
pf.logs.values,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, np.nan, 0, 0, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.0, 0.0, 0.0,
100.0, np.nan, 100.0, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(1, 0, 0, 1, 100.0, 0.0, 0.0, 100.0, 2.0, 100.0, 0.1, 2.0, 0, 0, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 99.69598, 0.1,
0.0, 99.69598, 2.0, 100.0, 0.1, 2.02, 0.10202, 0, 0, -1, 0),
(2, 0, 0, 2, 99.69598, 0.1, 0.0, 99.69598, 3.0, 99.99598, -1.0, 3.0,
0, 0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 99.89001,
0.0, 0.0, 99.89001, 3.0, 99.99598, 0.1, 2.9699999999999998, 0.10297, 1, 0, -1, 1),
(3, 0, 0, 3, 99.89001, 0.0, 0.0, 99.89001, 4.0, 99.89001, -0.1, 4.0,
0, 0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
99.89001, 0.0, 0.0, 99.89001, 4.0, 99.89001, np.nan, np.nan, np.nan, -1, 2, 8, -1),
(4, 0, 0, 4, 99.89001, 0.0, 0.0, 99.89001, 5.0, 99.89001, 1.0, 5.0, 0,
0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 94.68951,
1.0, 0.0, 94.68951, 5.0, 99.89001, 1.0, 5.05, 0.1505, 0, 0, -1, 2),
(5, 1, 1, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, 1.0, 1.0, 0, 1, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.8801, -1.0,
0.99, 98.9001, 1.0, 100.0, 1.0, 0.99, 0.10990000000000001, 1, 0, -1, 3),
(6, 1, 1, 1, 100.8801, -1.0, 0.99, 98.9001, 2.0, 98.8801, 0.1, 2.0, 0, 1,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.97612,
-1.1, 1.188, 98.60011999999999, 2.0, 98.8801, 0.1, 1.98, 0.10198, 1, 0, -1, 4),
(7, 1, 1, 2, 100.97612, -1.1, 1.188, 98.60011999999999, 2.0, 98.77611999999999,
-1.0, np.nan, 0, 1, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.97612,
-1.1, 1.188, 98.60011999999999, 2.0, 98.77611999999999, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(8, 1, 1, 3, 100.97612, -1.1, 1.188, 98.60011999999999, 4.0, 96.57611999999999,
-0.1, 4.0, 0, 1, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
100.46808, -1.0, 1.08, 98.30807999999999, 4.0, 96.57611999999999, 0.1, 4.04,
0.10404000000000001, 0, 0, -1, 5),
(9, 1, 1, 4, 100.46808, -1.0, 1.08, 98.30807999999999, 5.0, 95.46808, 1.0, 5.0, 0, 1,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 105.26858, -2.0, 6.03,
93.20857999999998, 5.0, 95.46808, 1.0, 4.95, 0.14950000000000002, 1, 0, -1, 6),
(10, 2, 2, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, 1.0, 1.0, 0, 2, 0.01, 0.1,
0.01, 1e-08, np.inf, 0.0, False, True, False, True, 98.8799, 1.0, 0.0, 98.8799,
1.0, 100.0, 1.0, 1.01, 0.1101, 0, 0, -1, 7),
(11, 2, 2, 1, 98.8799, 1.0, 0.0, 98.8799, 2.0, 100.8799, 0.1, 2.0, 0, 2, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 98.57588000000001, 1.1,
0.0, 98.57588000000001, 2.0, 100.8799, 0.1, 2.02, 0.10202, 0, 0, -1, 8),
(12, 2, 2, 2, 98.57588000000001, 1.1, 0.0, 98.57588000000001, 3.0, 101.87588000000001,
-1.0, 3.0, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
101.41618000000001, 0.10000000000000009, 0.0, 101.41618000000001, 3.0,
101.87588000000001, 1.0, 2.9699999999999998, 0.1297, 1, 0, -1, 9),
(13, 2, 2, 3, 101.41618000000001, 0.10000000000000009, 0.0, 101.41618000000001,
4.0, 101.81618000000002, -0.1, 4.0, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0,
False, True, False, True, 101.70822000000001, 0.0, 0.0, 101.70822000000001,
4.0, 101.81618000000002, 0.1, 3.96, 0.10396000000000001, 1, 0, -1, 10),
(14, 2, 2, 4, 101.70822000000001, 0.0, 0.0, 101.70822000000001, 4.0, 101.70822000000001,
1.0, np.nan, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
101.70822000000001, 0.0, 0.0, 101.70822000000001, 4.0, 101.70822000000001,
np.nan, np.nan, np.nan, -1, 1, 1, -1)
], dtype=log_dt)
)
result = pd.Series(
np.array([5, 5, 5]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.logs.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_logs(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_logs(group_by=False).count(),
result
)
result = pd.Series(
np.array([10, 5]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_logs(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.logs.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.logs.count(),
result
)
def test_entry_trades(self):
record_arrays_close(
pf.entry_trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0, -0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 1.0, 0, 0.99, 0.10990000000000001, 4, 4.954285714285714,
0.049542857142857145, -4.12372857142857, -4.165382395382394, 1, 0, 2),
(3, 1, 0.1, 1, 1.98, 0.10198, 4, 4.954285714285714, 0.004954285714285714,
-0.4043628571428571, -2.0422366522366517, 1, 0, 2),
(4, 1, 1.0, 4, 4.95, 0.14950000000000002, 4, 4.954285714285714,
0.049542857142857145, -0.20332857142857072, -0.04107647907647893, 1, 0, 2),
(5, 2, 1.0, 0, 1.01, 0.1101, 3, 3.0599999999999996, 0.21241818181818184,
1.727481818181818, 1.71037803780378, 0, 1, 3),
(6, 2, 0.1, 1, 2.02, 0.10202, 3, 3.0599999999999996, 0.021241818181818185,
-0.019261818181818203, -0.09535553555355546, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 3, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.entry_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_entry_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_entry_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([5, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_entry_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.entry_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.entry_trades.count(),
result
)
def test_exit_trades(self):
record_arrays_close(
pf.exit_trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 0.1, 0, 1.0799999999999998, 0.019261818181818182,
3, 4.04, 0.10404000000000001, -0.4193018181818182, -3.882424242424243, 1, 1, 2),
(3, 1, 2.0, 0, 3.015, 0.3421181818181819, 4, 5.0, 0.0,
-4.312118181818182, -0.7151108095884214, 1, 0, 2),
(4, 2, 1.0, 0, 1.1018181818181818, 0.19283636363636364, 2,
2.9699999999999998, 0.1297, 1.5456454545454543, 1.4028135313531351, 0, 1, 3),
(5, 2, 0.10000000000000009, 0, 1.1018181818181818, 0.019283636363636378,
3, 3.96, 0.10396000000000001, 0.1625745454545457, 1.4755115511551162, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 2, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.exit_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_exit_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_exit_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([4, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_exit_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.exit_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.exit_trades.count(),
result
)
def test_positions(self):
record_arrays_close(
pf.positions.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998,
0.10297, -0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 2.1, 0, 2.9228571428571426, 0.36138000000000003, 4, 4.954285714285714,
0.10404000000000001, -4.731420000000001, -0.7708406647116326, 1, 0, 2),
(3, 2, 1.1, 0, 1.1018181818181818, 0.21212000000000003, 3,
3.06, 0.23366000000000003, 1.7082200000000003, 1.4094224422442245, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.positions.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_positions(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_positions(group_by=False).count(),
result
)
result = pd.Series(
np.array([3, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_positions(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.positions.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.positions.count(),
result
)
def test_drawdowns(self):
record_arrays_close(
pf.drawdowns.values,
np.array([
(0, 0, 0, 1, 4, 4, 100.0, 99.68951, 99.68951, 0),
(1, 1, 0, 1, 4, 4, 99.8801, 95.26858, 95.26858, 0),
(2, 2, 2, 3, 3, 4, 101.71618000000001, 101.70822000000001, 101.70822000000001, 0)
], dtype=drawdown_dt)
)
result = pd.Series(
np.array([1, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_drawdowns(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_drawdowns(group_by=False).count(),
result
)
result = pd.Series(
np.array([1, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_drawdowns(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.drawdowns.count(),
result
)
def test_close(self):
pd.testing.assert_frame_equal(pf.close, price_na)
pd.testing.assert_frame_equal(pf_grouped.close, price_na)
pd.testing.assert_frame_equal(pf_shared.close, price_na)
def test_get_filled_close(self):
pd.testing.assert_frame_equal(
pf.get_filled_close(),
price_na.ffill().bfill()
)
def test_asset_flow(self):
pd.testing.assert_frame_equal(
pf.asset_flow(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 0.1],
[-0.1, 0., -1.],
[0., 0., -0.1],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.asset_flow(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 0.1, 0.],
[0., 0., 0.],
[0., -0.1, 0.],
[0., 1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -0.1, 0.1],
[-0.1, 0., -1.],
[0., 0.1, -0.1],
[1., -1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_flow(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_flow(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_flow(),
result
)
def test_assets(self):
pd.testing.assert_frame_equal(
pf.assets(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 1.1],
[0., 0., 0.1],
[0., 0., 0.],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.assets(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 1.1, 0.],
[0., 1.1, 0.],
[0., 1., 0.],
[0., 2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -1.1, 1.1],
[0., -1.1, 0.1],
[0., -1., 0.],
[1., -2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.assets(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.assets(),
result
)
pd.testing.assert_frame_equal(
pf_shared.assets(),
result
)
def test_position_mask(self):
pd.testing.assert_frame_equal(
pf.position_mask(direction='longonly'),
pd.DataFrame(
np.array([
[False, False, True],
[True, False, True],
[False, False, True],
[False, False, False],
[True, False, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.position_mask(direction='shortonly'),
pd.DataFrame(
np.array([
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[False, True, True],
[True, True, True],
[False, True, True],
[False, True, False],
[True, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.position_mask(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.position_mask(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.position_mask(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[True, True],
[True, True],
[True, True],
[True, False],
[True, False]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.position_mask(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.position_mask(),
result
)
pd.testing.assert_frame_equal(
pf_shared.position_mask(),
result
)
def test_position_coverage(self):
pd.testing.assert_series_equal(
pf.position_coverage(direction='longonly'),
pd.Series(np.array([0.4, 0., 0.6]), index=price_na.columns).rename('position_coverage')
)
pd.testing.assert_series_equal(
pf.position_coverage(direction='shortonly'),
pd.Series(np.array([0., 1., 0.]), index=price_na.columns).rename('position_coverage')
)
result = pd.Series(np.array([0.4, 1., 0.6]), index=price_na.columns).rename('position_coverage')
pd.testing.assert_series_equal(
pf.position_coverage(),
result
)
pd.testing.assert_series_equal(
pf_grouped.position_coverage(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.position_coverage(group_by=False),
result
)
result = pd.Series(
np.array([0.7, 0.6]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('position_coverage')
pd.testing.assert_series_equal(
pf.position_coverage(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.position_coverage(),
result
)
pd.testing.assert_series_equal(
pf_shared.position_coverage(),
result
)
def test_cash_flow(self):
pd.testing.assert_frame_equal(
pf.cash_flow(free=True),
pd.DataFrame(
np.array([
[0.0, -1.0998999999999999, -1.1201],
[-0.30402, -0.2999800000000002, -0.3040200000000002],
[0.19402999999999998, 0.0, 2.8402999999999996],
[0.0, -0.2920400000000002, 0.29204000000000035],
[-5.2005, -5.0995, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., 0.8801, -1.1201],
[-0.30402, 0.09602, -0.30402],
[0.19403, 0., 2.8403],
[0., -0.50804, 0.29204],
[-5.2005, 4.8005, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.cash_flow(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash_flow(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash_flow(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0.8801, -1.1201],
[-0.208, -0.30402],
[0.19403, 2.8403],
[-0.50804, 0.29204],
[-0.4, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.cash_flow(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash_flow(),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash_flow(),
result
)
def test_init_cash(self):
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
pf_grouped.get_init_cash(group_by=False),
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
pf_shared.get_init_cash(group_by=False),
pd.Series(np.array([200., 200., 100.]), index=price_na.columns).rename('init_cash')
)
result = pd.Series(
np.array([200., 100.]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
pd.testing.assert_series_equal(
pf.get_init_cash(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.init_cash,
result
)
pd.testing.assert_series_equal(
pf_shared.init_cash,
result
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=None).init_cash,
pd.Series(
np.array([14000., 12000., 10000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=None).init_cash,
pd.Series(
np.array([14000., 14000., 14000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
def test_cash(self):
pd.testing.assert_frame_equal(
pf.cash(free=True),
pd.DataFrame(
np.array([
[100.0, 98.9001, 98.8799],
[99.69598, 98.60011999999999, 98.57588000000001],
[99.89001, 98.60011999999999, 101.41618000000001],
[99.89001, 98.30807999999999, 101.70822000000001],
[94.68951, 93.20857999999998, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[100., 100.8801, 98.8799],
[99.69598, 100.97612, 98.57588],
[99.89001, 100.97612, 101.41618],
[99.89001, 100.46808, 101.70822],
[94.68951, 105.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.cash(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash(group_by=False),
pd.DataFrame(
np.array([
[200., 200.8801, 98.8799],
[199.69598, 200.97612, 98.57588],
[199.89001, 200.97612, 101.41618],
[199.89001, 200.46808, 101.70822],
[194.68951, 205.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.cash(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[200.8801, 200.8801, 98.8799],
[200.6721, 200.97612, 98.57588000000001],
[200.86613, 200.6721, 101.41618000000001],
[200.35809, 200.35809, 101.70822000000001],
[199.95809, 205.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200.8801, 98.8799],
[200.6721, 98.57588],
[200.86613, 101.41618],
[200.35809, 101.70822],
[199.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.cash(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash(),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash(),
result
)
def test_asset_value(self):
pd.testing.assert_frame_equal(
pf.asset_value(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.2, 0., 2.2],
[0., 0., 0.3],
[0., 0., 0.],
[5., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.asset_value(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 2.2, 0.],
[0., 2.2, 0.],
[0., 4., 0.],
[0., 10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.2, -2.2, 2.2],
[0., -2.2, 0.3],
[0., -4., 0.],
[5., -10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_value(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-1., 1.],
[-2., 2.2],
[-2.2, 0.3],
[-4., 0.],
[-5., 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.asset_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_value(),
result
)
def test_gross_exposure(self):
pd.testing.assert_frame_equal(
pf.gross_exposure(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 0.01001202],
[0.00200208, 0., 0.02183062],
[0., 0., 0.00294938],
[0., 0., 0.],
[0.05015573, 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.gross_exposure(direction='shortonly'),
pd.DataFrame(
np.array([
[0.0, 0.01000999998999, 0.0],
[0.0, 0.021825370842812494, 0.0],
[0.0, 0.021825370842812494, 0.0],
[0.0, 0.03909759620159034, 0.0],
[0.0, 0.09689116931945001, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0.0, -0.010214494162927312, 0.010012024441354066],
[0.00200208256628545, -0.022821548354919067, 0.021830620581035857],
[0.0, -0.022821548354919067, 0.002949383274126105],
[0.0, -0.04241418126633477, 0.0],
[0.050155728521486365, -0.12017991413866216, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.gross_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.gross_exposure(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.00505305454620791, 0.010012024441354066],
[0.0010005203706447724, -0.011201622483733716, 0.021830620581035857],
[0.0, -0.011201622483733716, 0.002949383274126105],
[0.0, -0.020585865497718882, 0.0],
[0.025038871596209537, -0.0545825965137659, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.00505305454620791, 0.010012024441354066],
[-0.010188689433972452, 0.021830620581035857],
[-0.0112078992458765, 0.002949383274126105],
[-0.02059752492931316, 0.0],
[-0.027337628293439265, 0.0]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.gross_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_shared.gross_exposure(),
result
)
def test_net_exposure(self):
result = pd.DataFrame(
np.array([
[0.0, -0.01000999998999, 0.010012024441354066],
[0.00200208256628545, -0.021825370842812494, 0.021830620581035857],
[0.0, -0.021825370842812494, 0.002949383274126105],
[0.0, -0.03909759620159034, 0.0],
[0.050155728521486365, -0.09689116931945001, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.net_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.net_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.net_exposure(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.005002498748124688, 0.010012024441354066],
[0.0010005203706447724, -0.010956168751293576, 0.021830620581035857],
[0.0, -0.010956168751293576, 0.002949383274126105],
[0.0, -0.019771825228137207, 0.0],
[0.025038871596209537, -0.049210520540028384, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.005002498748124688, 0.010012024441354066],
[-0.009965205542937988, 0.021830620581035857],
[-0.010962173376438594, 0.002949383274126105],
[-0.019782580537729116, 0.0],
[-0.0246106361476199, 0.0]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.net_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.net_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_shared.net_exposure(),
result
)
def test_value(self):
result = pd.DataFrame(
np.array([
[100., 99.8801, 99.8799],
[99.89598, 98.77612, 100.77588],
[99.89001, 98.77612, 101.71618],
[99.89001, 96.46808, 101.70822],
[99.68951, 95.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.value(group_by=False),
pd.DataFrame(
np.array([
[200., 199.8801, 99.8799],
[199.89598, 198.77612, 100.77588],
[199.89001, 198.77612, 101.71618],
[199.89001, 196.46808, 101.70822],
[199.68951, 195.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.value(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[199.8801, 199.8801, 99.8799],
[198.6721, 198.77612000000002, 100.77588000000002],
[198.66613, 198.6721, 101.71618000000001],
[196.35809, 196.35809, 101.70822000000001],
[194.95809, 195.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[199.8801, 99.8799],
[198.6721, 100.77588],
[198.66613, 101.71618],
[196.35809, 101.70822],
[194.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.value(),
result
)
def test_total_profit(self):
result = pd.Series(
np.array([-0.31049, -4.73142, 1.70822]),
index=price_na.columns
).rename('total_profit')
pd.testing.assert_series_equal(
pf.total_profit(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_profit(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_profit(group_by=False),
result
)
result = pd.Series(
np.array([-5.04191, 1.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_profit')
pd.testing.assert_series_equal(
pf.total_profit(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_profit(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_profit(),
result
)
def test_final_value(self):
result = pd.Series(
np.array([99.68951, 95.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
pd.testing.assert_series_equal(
pf.final_value(),
result
)
pd.testing.assert_series_equal(
pf_grouped.final_value(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.final_value(group_by=False),
pd.Series(
np.array([199.68951, 195.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
)
result = pd.Series(
np.array([194.95809, 101.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('final_value')
pd.testing.assert_series_equal(
pf.final_value(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.final_value(),
result
)
pd.testing.assert_series_equal(
pf_shared.final_value(),
result
)
def test_total_return(self):
result = pd.Series(
np.array([-0.0031049, -0.0473142, 0.0170822]),
index=price_na.columns
).rename('total_return')
pd.testing.assert_series_equal(
pf.total_return(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_return(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_return(group_by=False),
pd.Series(
np.array([-0.00155245, -0.0236571, 0.0170822]),
index=price_na.columns
).rename('total_return')
)
result = pd.Series(
np.array([-0.02520955, 0.0170822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_return')
pd.testing.assert_series_equal(
pf.total_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_return(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_return(),
result
)
def test_returns(self):
result = pd.DataFrame(
np.array([
[0.00000000e+00, -1.19900000e-03, -1.20100000e-03],
[-1.04020000e-03, -1.10530526e-02, 8.97057366e-03],
[-5.97621646e-05, 0.0, 9.33060570e-03],
[0.00000000e+00, -0.023366376407576966, -7.82569695e-05],
[-2.00720773e-03, -1.24341648e-02, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.returns(group_by=False),
pd.DataFrame(
np.array([
[0.00000000e+00, -5.99500000e-04, -1.20100000e-03],
[-5.20100000e-04, -5.52321117e-03, 8.97057366e-03],
[-2.98655331e-05, 0.0, 9.33060570e-03],
[0.00000000e+00, -0.011611253907159497, -7.82569695e-05],
[-1.00305163e-03, -6.10531746e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.returns(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[0.0, -0.0005995000000000062, -1.20100000e-03],
[-0.0005233022960706736, -0.005523211165093367, 8.97057366e-03],
[-3.0049513746473233e-05, 0.0, 9.33060570e-03],
[0.0, -0.011617682390048093, -7.82569695e-05],
[-0.0010273695869600474, -0.0061087373583639994, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-5.99500000e-04, -1.20100000e-03],
[-6.04362315e-03, 8.97057366e-03],
[-3.0049513746473233e-05, 9.33060570e-03],
[-0.011617682390048093, -7.82569695e-05],
[-7.12983101e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.returns(),
result
)
def test_asset_returns(self):
result = pd.DataFrame(
np.array([
[0., -np.inf, -np.inf],
[-np.inf, -1.10398, 0.89598],
[-0.02985, 0.0, 0.42740909],
[0., -1.0491090909090908, -0.02653333],
[-np.inf, -0.299875, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-np.inf, -np.inf],
[-1.208, 0.89598],
[-0.0029850000000000154, 0.42740909],
[-1.0491090909090908, -0.02653333],
[-0.35, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.asset_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_returns(),
result
)
def test_benchmark_value(self):
result = pd.DataFrame(
np.array([
[100., 100., 100.],
[100., 200., 200.],
[150., 200., 300.],
[200., 400., 400.],
[250., 500., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.benchmark_value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_value(group_by=False),
pd.DataFrame(
np.array([
[200., 200., 100.],
[200., 400., 200.],
[300., 400., 300.],
[400., 800., 400.],
[500., 1000., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200., 100.],
[300., 200.],
[350., 300.],
[600., 400.],
[750., 400.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.benchmark_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_value(),
result
)
def test_benchmark_returns(self):
result = pd.DataFrame(
np.array([
[0., 0., 0.],
[0., 1., 1.],
[0.5, 0., 0.5],
[0.33333333, 1., 0.33333333],
[0.25, 0.25, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.benchmark_returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0., 0.],
[0.5, 1.],
[0.16666667, 0.5],
[0.71428571, 0.33333333],
[0.25, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.benchmark_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_returns(),
result
)
def test_total_benchmark_return(self):
result = pd.Series(
np.array([1.5, 4., 3.]),
index=price_na.columns
).rename('total_benchmark_return')
pd.testing.assert_series_equal(
pf.total_benchmark_return(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_benchmark_return(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_benchmark_return(group_by=False),
result
)
result = pd.Series(
np.array([2.75, 3.]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_benchmark_return')
pd.testing.assert_series_equal(
pf.total_benchmark_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_benchmark_return(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_benchmark_return(),
result
)
def test_return_method(self):
pd.testing.assert_frame_equal(
pf_shared.cumulative_returns(),
pd.DataFrame(
np.array([
[-0.000599499999999975, -0.0012009999999998966],
[-0.006639499999999909, 0.007758800000000177],
[-0.006669349999999907, 0.017161800000000005],
[-0.01820955000000002, 0.017082199999999936],
[-0.025209550000000136, 0.017082199999999936]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
)
pd.testing.assert_frame_equal(
pf_shared.cumulative_returns(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.000599499999999975, -0.0012009999999998966],
[-0.0005201000000001343, -0.006119399999999886, 0.007758800000000177],
[-0.0005499500000001323, -0.006119399999999886, 0.017161800000000005],
[-0.0005499500000001323, -0.017659599999999886, 0.017082199999999936],
[-0.0015524500000001495, -0.023657099999999875, 0.017082199999999936]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(),
pd.Series(
np.array([-20.095906945591288, 12.345065267401496]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(risk_free=0.01),
pd.Series(
np.array([-59.62258787402645, -23.91718815937344]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(year_freq='365D'),
pd.Series(
np.array([-20.095906945591288, 12.345065267401496]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(group_by=False),
pd.Series(
np.array([-13.30950646054953, -19.278625117344564, 12.345065267401496]),
index=price_na.columns
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.information_ratio(group_by=False),
pd.Series(
np.array([-0.9988561334618041, -0.8809478746008806, -0.884780642352239]),
index=price_na.columns
).rename('information_ratio')
)
with pytest.raises(Exception):
_ = pf_shared.information_ratio(pf_shared.benchmark_returns(group_by=False) * 2)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Start Value', 'End Value',
'Total Return [%]', 'Benchmark Return [%]', 'Max Gross Exposure [%]',
'Total Fees Paid', 'Max Drawdown [%]', 'Max Drawdown Duration',
'Total Trades', 'Total Closed Trades', 'Total Open Trades',
'Open Trade PnL', 'Win Rate [%]', 'Best Trade [%]', 'Worst Trade [%]',
'Avg Winning Trade [%]', 'Avg Losing Trade [%]',
'Avg Winning Trade Duration', 'Avg Losing Trade Duration',
'Profit Factor', 'Expectancy', 'Sharpe Ratio', 'Calmar Ratio',
'Omega Ratio', 'Sortino Ratio'
], dtype='object')
pd.testing.assert_series_equal(
pf.stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 98.88877000000001, -1.11123, 283.3333333333333,
2.05906183131983, 0.42223000000000005, 1.6451238489727062, pd.Timedelta('3 days 08:00:00'),
2.0, 1.3333333333333333, 0.6666666666666666, -1.5042060606060605, 33.333333333333336,
-98.38058805880588, -100.8038553855386, 143.91625412541256, -221.34645964596464,
pd.Timedelta('2 days 12:00:00'), pd.Timedelta('2 days 00:00:00'), np.inf, 0.10827272727272726,
-6.751008013903537, 10378.930331014584, 4.768700318817701, 31.599760994679134
]),
index=stats_index,
name='agg_func_mean')
)
pd.testing.assert_series_equal(
pf.stats(column='a'),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), | pd.Timestamp('2020-01-05 00:00:00') | pandas.Timestamp |
import pandas as pd
all_genes = pd.read_csv("https://raw.githubusercontent.com/s-a-nersisyan/HSE_bioinformatics_2021/master/seminar13/all_genes.txt", header=None)[0]
df = | pd.DataFrame(index=all_genes) | pandas.DataFrame |
import warnings
warnings.simplefilter(action='ignore', category=Warning)
from IMLearn import BaseEstimator
from challenge.agoda_cancellation_estimator import AgodaCancellationEstimator
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.preprocessing import MinMaxScaler
# conversions from USD to all currencies
TO_USD = {'USD': 1, 'AED': 3.6725, 'AFN': 87.5007, 'ALL': 111.4952, 'AMD': 467.558, 'ANG': 1.79, 'AOA': 404.0973,
'ARS': 114.04, 'AUD': 1.3726, 'AWG': 1.79, 'AZN': 1.6979, 'BAM': 1.8108, 'BBD': 2.0, 'BDT': 85.438,
'BGN': 1.8106, 'BHD': 0.376, 'BIF': 2024.8096, 'BMD': 1.0, 'BND': 1.3669, 'BOB': 6.8653, 'BRL': 4.6806,
'BSD': 1.0, 'BTN': 76.4959, 'BWP': 11.973, 'BYN': 2.8555, 'BZD': 2.0, 'CAD': 1.2669, 'CDF': 1999.8809,
'CHF': 0.9563, 'CLP': 822.3729, 'CNY': 6.5225, 'COP': 3728.9547, 'CRC': 657.6806, 'CUP': 24.0,
'CVE': 102.0895, 'CZK': 22.4939, 'DJF': 177.721, 'DKK': 6.9072, 'DOP': 54.912, 'DZD': 143.832,
'EGP': 18.5802, 'ERN': 15.0, 'ETB': 51.3614, 'EUR': 0.9259, 'FJD': 2.1163, 'FKP': 0.7787, 'FOK': 6.9072,
'GBP': 0.7788, 'GEL': 3.0339, 'GGP': 0.7787, 'GHS': 7.7553, 'GIP': 0.7787, 'GMD': 54.0333,
'GNF': 8896.9671, 'GTQ': 7.6475, 'GYD': 209.0387, 'HKD': 7.8479, 'HNL': 24.5693, 'HRK': 6.9759,
'HTG': 107.894, 'HUF': 343.295, 'IDR': 14341.489, 'ILS': 3.2735, 'IMP': 0.7787, 'INR': 76.4723,
'IQD': 1458.072, 'IRR': 42051.3384, 'ISK': 128.7315, 'JEP': 0.7787, 'JMD': 154.654, 'JOD': 0.709,
'JPY': 128.7001, 'KES': 115.7729, 'KGS': 82.9306, 'KHR': 4041.021, 'KID': 1.3734, 'KMF': 455.4913,
'KRW': 1241.5203, 'KWD': 0.2996, 'KYD': 0.8333, 'KZT': 443.6302, 'LAK': 13106.4208, 'LBP': 1507.5,
'LKR': 330.8464, 'LRD': 152.0024, 'LSL': 15.5633, 'LYD': 4.712, 'MAD': 9.6981, 'MDL': 18.4927,
'MGA': 3991.8343, 'MKD': 56.6224, 'MMK': 1835.3117, 'MNT': 3052.3832, 'MOP': 8.0833, 'MRU': 36.4208,
'MUR': 42.6761, 'MVR': 15.4107, 'MWK': 819.5117, 'MXN': 20.2706, 'MYR': 4.3037, 'MZN': 64.6108,
'NAD': 15.5633, 'NGN': 414.9575, 'NIO': 35.8503, 'NOK': 8.9409, 'NPR': 122.3934, 'NZD': 1.5043,
'OMR': 0.3845, 'PAB': 1.0, 'PEN': 3.7455, 'PGK': 3.5245, 'PHP': 52.3739, 'PKR': 186.6637, 'PLN': 4.2895,
'PYG': 6827.8499, 'QAR': 3.64, 'RON': 4.5623, 'RSD': 108.8545, 'RUB': 77.0753, 'RWF': 1051.2487,
'SAR': 3.75, 'SBD': 7.9427, 'SCR': 14.4082, 'SDG': 445.0241, 'SEK': 9.5371, 'SGD': 1.3669, 'SHP': 0.7787,
'SLL': 12368.3272, 'SOS': 577.9904, 'SRD': 20.7337, 'SSP': 425.1448, 'STN': 22.6835, 'SYP': 2517.89,
'SZL': 15.5633, 'THB': 34.0252, 'TJS': 12.4745, 'TMT': 3.4991, 'TND': 2.819, 'TOP': 2.2329,
'TRY': 14.7711, 'TTD': 6.7809, 'TVD': 1.3734, 'TWD': 29.2194, 'TZS': 2316.5256, 'UAH': 29.523,
'UGX': 3522.2721, 'UYU': 40.3923, 'UZS': 11347.4483, 'VES': 4.4354, 'VND': 22974.0933, 'VUV': 111.8606,
'WST': 2.5658, 'XAF': 607.3217, 'XCD': 2.7, 'XDR': 0.7358, 'XOF': 607.3217, 'XPF': 110.4843,
'YER': 250.3169, 'ZAR': 15.5636, 'ZMW': 17.0195, 'ZWL': 153.7166}
WEEKLY_LABELS_FILES = {1: 'test_set_week_1_labels.csv', 2: 'test_set_labels_week_2.csv',
3: 'test_set_week_3_labels.csv',
4: 'test_set_week_4_labels.csv'}
def undersample(df: pd.DataFrame, label_col_name: str) -> pd.DataFrame:
# find the number of observations in the smallest group
label_1 = df[df[label_col_name] == 1]
label_0 = df[df[label_col_name] == 0]
label_0 = label_0.sample(n=len(label_1) * 10, random_state=1)
df = pd.concat([label_1, label_0], axis=0)
return df
def days_to_P(policy, days):
d_idx = policy.find('D')
fine_days = int(policy[d_idx + 1: -1])
if policy[-1] == 'N':
P = str(int(round((fine_days / days) * 100, 0)))
else:
P = str(int(policy[d_idx + 1: -1]))
fine_days = policy[:d_idx]
return fine_days, P
def parse_policy2(policy, stay_days):
if policy == 'UNKNOWN':
return 100100
if stay_days < 0:
return 100100
policies = policy.split("_")
if len(policies) == 1:
days, P = days_to_P(policies[0], stay_days)
return int(days + P)
if len(policies) == 2:
if 'D' not in policies[1]:
# no second step
day1, P1 = days_to_P(policies[0], stay_days)
day2, P2 = days_to_P(policies[1], stay_days)
return int(day1 + P1 + P2)
else:
day1, P1 = days_to_P(policies[0], stay_days)
day2, P2 = days_to_P(policies[1], stay_days)
return int(day1 + P1 + day2 + P2)
else:
day1, P1 = days_to_P(policies[0], stay_days)
day2, P2 = days_to_P(policies[1], stay_days)
day3, P3 = days_to_P(policies[2], stay_days)
return int(day1 + P1 + day2 + P2 + P3)
def load_data(train_filename: str, test_filename, week_sets=iter([])):
"""
Load Agoda booking cancellation dataset
Parameters
----------
train_filename: str
Path to house prices dataset
test_filename: str
path to test dataset
week_sets: iterable
the number of weeks to learn on in addition to train dataset
Returns
-------
Design matrix and response vector in either of the following formats:
1) Single dataframe with last column representing the response
2) Tuple of pandas.DataFrame and Series
3) Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,)
"""
df = pd.read_csv(train_filename).drop_duplicates()
df['cancellation_datetime'] = df['cancellation_datetime'].fillna(0)
df['cancellation_datetime'] = pd.to_datetime(df['cancellation_datetime'])
df['booking_datetime'] = pd.to_datetime(df['booking_datetime'])
df['checkin_date'] = pd.to_datetime(df['checkin_date'])
df['checkout_date'] = pd.to_datetime(df['checkout_date'])
df['new_datetime'] = (df['cancellation_datetime'] - df['booking_datetime']).dt.days
df['new_datetime'][df['new_datetime'] > 45] = 0
df['new_datetime'][df['new_datetime'] < 7] = 0
df['another_datetime'] = (df['checkin_date'] - df['cancellation_datetime']).dt.days
df['new_datetime'][df['another_datetime'] < 2] = 0
df['new_datetime'][df['new_datetime'] != 0] = 1
df['cancellation_datetime'] = df['new_datetime']
df.drop(['new_datetime', 'another_datetime'], axis=1, inplace=True)
# add weekly data with labels
for k in week_sets:
weekly_set = pd.read_csv(f'test_weeks_data/test_set_week_{k}.csv')
weekly_labels = pd.read_csv(f'test_weeks_labels/{WEEKLY_LABELS_FILES[k]}')
weekly_data = pd.concat([weekly_set, weekly_labels], axis=1)
weekly_data['cancellation_datetime'] = [int(x.strip()[-1]) for x in weekly_data['h_booking_id|label']]
weekly_data = weekly_data.drop('h_booking_id|label', axis=1)
df = pd.concat([df, weekly_data], ignore_index=True)
# add test data to be processed together with train data
test_data = pd.read_csv(test_filename)
test_size = test_data.shape[0]
full_data = pd.concat([df, test_data])
df = pd.DataFrame(full_data, columns=['booking_datetime', 'checkin_date', 'checkout_date',
'hotel_star_rating',
# 'accommadation_type_name',
'charge_option',
'customer_nationality',
'guest_is_not_the_customer',
# 'guest_nationality_country_name',
'no_of_adults', 'no_of_children',
# 'no_of_extra_bed'
'no_of_room',
'original_selling_amount',
'original_payment_method',
# 'original_payment_type',
'original_payment_currency',
'is_user_logged_in', 'is_first_booking',
'request_nonesmoke', 'request_latecheckin', 'request_highfloor',
'request_largebed', 'request_twinbeds', 'request_airport',
'request_earlycheckin',
'cancellation_policy_code', 'cancellation_datetime',
# 'hotel_city_code',
'hotel_chain_code',
'hotel_brand_code',
'hotel_area_code',
# 'hotel_country_code'
])
# df = df[df['original_selling_amount'] < 20000]
df = pd.get_dummies(data=df, columns=[
# 'accommadation_type_name',
'charge_option',
'customer_nationality',
# 'guest_nationality_country_name',
'original_payment_method',
# 'original_payment_type'
# 'original_payment_currency'
], drop_first=True)
df['booking_datetime'] = pd.to_datetime(df['booking_datetime'])
df['checkin_date'] = | pd.to_datetime(df['checkin_date']) | pandas.to_datetime |
from read_data import read_data
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.patches as ptc
import matplotlib.dates as mdt
import datetime as dt
import numpy as np
import math
#This visualization shows time series of raw volume recorded in each time step
# as well as a color coded rectangle to show the time and duration of each event
raw, evn, mf, tdc = read_data()
# gets unique list of each date in the dataset
dates = evn['start'].map(lambda t: t.date()).unique()
#all 15 days are a lot to put on one plot so, do 'daysper' per plot
daysper = 3
n=1
# Option to toggle collapsing colored events to one line or keep separate
collapse = 1 #0 to show on different y values, 1 to show on same y
for d in range(len(dates)):
#initialize dataframe to keep track of totals in each type for showing in legend
tots = | pd.DataFrame(index=tdc[0], columns=['vol']) | pandas.DataFrame |
'''Train CIFAR10 with PyTorch.'''
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from tqdm import trange
import pandas as pd
from PIL import Image
def make_train_anno(data_root_dir, anno_path):
# Set data directories.
train_dir = data_root_dir + 'bounding_box_train/'
# Get image names.
train_img_names = sorted([d for d in os.listdir(train_dir) if d.split('.')[-1].lower() in ('jpg', 'jpeg', 'png')])
# Organize anntation data.
train_list = __org_data(train_dir, train_img_names, 'train')
df = pd.DataFrame(train_list)
df = __add_person_index(df)
# Save DataFrame.
__save_dataframe(df, anno_path, 'train')
def make_qng_anno(data_root_dir, anno_path):
# Set data directories.
gallery_dir = data_root_dir + 'bounding_box_test/'
query_dir = data_root_dir + 'query/'
# Get image names.
gallery_img_names = sorted([d for d in os.listdir(gallery_dir) if d.split('.')[-1].lower() in ('jpg', 'jpeg', 'png')])
query_img_names = sorted([d for d in os.listdir(query_dir) if d.split('.')[-1].lower() in ('jpg', 'jpeg', 'png')])
# Organize anntation data.
gallery_list = __org_data(gallery_dir, gallery_img_names, 'gallery')
query_list = __org_data(query_dir, query_img_names, 'query')
data_list = gallery_list + query_list
df = pd.DataFrame(data_list)
# Save DataFrame.
__save_dataframe(df, anno_path, 'test')
def __org_data(data_dir, img_names, mode):
lis = []
for i in trange(len(img_names), desc='Organizing {} data'.format(mode)):
dic = {}
dic['image_name'] = img_names[i]
dic['image_path'] = data_dir + img_names[i]
splited = img_names[i].split('_')
dic['person_id'] = splited[0]
dic['camera_id'] = splited[1][:2]
dic['sequence_id'] = splited[1][2:]
dic['frame_no'] = splited[2]
dic['dpm_bbox_no'] = splited[3].split('.')[0] # DPM: Deformable Part Model, bbox: bounding box
dic['mode'] = mode
lis.append(dic)
return lis
def __add_person_index(df):
# Make person ID and index dictonary.
train_person_ids = sorted(set(df.loc[df['mode']=='train', 'person_id'].values))
train_person_dic = {train_person_ids[i]: i for i in range(len(train_person_ids))}
# Set person indexes.
df['person_index'] = -999
for p_id, p_idx in train_person_dic.items():
cond = df['person_id']==p_id
df.loc[cond, 'person_index'] = p_idx
return df
def __save_dataframe(df, anno_path, mode):
cols = ['person_id', 'camera_id', 'sequence_id', 'frame_no', 'dpm_bbox_no', 'mode', 'image_name', 'image_path']
if mode == 'train':
cols = ['person_index'] + cols
df = df[cols]
df.to_csv(anno_path, index=False)
print('Saved "{}" annotation data of Market1501 to {}'.format(mode, anno_path))
class Market1501Train(object):
def __init__(self, anno_path, mode, transforms=None):
df_all = pd.read_csv(anno_path)
self.df = df_all[df_all['mode']==mode].reset_index(drop=True)
self.transforms = transforms
def __getitem__(self, idx):
# Filter data
df = self.df.copy()
# Image
img_path = df.loc[idx, 'image_path']
assert os.path.exists(img_path)
img = Image.open(img_path).convert('RGB')
img = img.resize([img.size[0], img.size[0]], Image.NEAREST)
# Target
#person_id = df.loc[idx, 'person_id']
person_index = df.loc[idx, 'person_index']
# Transform
if self.transforms is not None:
img = self.transforms(img)
return img, person_index
def __len__(self):
return len(set(self.df['image_path'].values.tolist()))
class Market1501Test(object):
def __init__(self, anno_path, mode, transforms=None):
df_all = pd.read_csv(anno_path)
self.df = df_all[df_all['mode']==mode].reset_index(drop=True)
self.transforms = transforms
def __getitem__(self, idx):
# Filter data
df = self.df.copy()
# Image
img_path = df.loc[idx, 'image_path']
assert os.path.exists(img_path)
img = Image.open(img_path).convert('RGB')
img = img.resize([img.size[0], img.size[0]], Image.NEAREST)
# Target
person_id = df.loc[idx, 'person_id']
#person_index = df.loc[idx, 'person_index']
# Transform
if self.transforms is not None:
img = self.transforms(img)
return img, person_id, img_path
def __len__(self):
return len(set(self.df['image_path'].values.tolist()))
def load_train_data(anno_path, n_batch=32):
# cf. https://github.com/GNAYUOHZ/ReID-MGN/blob/master/data.py
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
train_set = Market1501Train(anno_path, 'train', transforms=transform_train)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=n_batch, shuffle=True, num_workers=0)
df = | pd.read_csv(anno_path) | pandas.read_csv |
import unittest
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import matplotlib as mpl
import os
from openiec.property.coherentenergy_OC import CoherentGibbsEnergy_OC
from openiec.calculate.calcsigma_OC import SigmaCoherent_OC
from pyOC import opencalphad as oc
from pyOC import GridMinimizerStatus as gmStat
def run_NUCLEA():
print('### test U-O coherent interface in the liquid miscibility gap ###\n')
# tdb filepath
tdbFile=os.environ['TDBDATA_PRIVATE']+'/feouzr.tdb'
# tdbFile=os.environ['TDBDATA_PRIVATE']+'/NUCLEA-17_1_mod.TDB'
# tdbFile='tests/TAF_uzrofe_V10.TDB'
# components
comps = ['O', 'U']
# mass density laws (from Barrachin2004)
constituentDensityLaws = {
'U1' : lambda T: 17270.0-1.358*(T-1408),
'ZR1' : lambda T: 6844.51-0.609898*T+2.05008E-4*T**2-4.47829E-8*T**3+3.26469E-12*T**4,
'O2U1' : lambda T: 8860.0-9.285E-1*(T-3120),
'O2ZR1': lambda T: 5150-0.445*(T-2983),
'O1' : lambda T: 1.141 # set to meaningless value but ok as, no 'free' oxygen in the considered mixtures
}
# phase names
phasenames = ['LIQUID#1', 'LIQUID#2']
# pressure & temp
P = 1E5
T = 3200
# Given initial alloy composition. x0 is the mole fraction of U.
x0min = [0.5]
x0max = [0.7]
x0range = np.linspace(x0min[0],x0max[0],num=20, endpoint=True)
# Composition step for searching initial interfacial equilibrium composition.
dx = 0.05
results = | pd.DataFrame(columns=['X_U', 'n_phase1', 'n_phase2', 'mu_U', 'mu_O']) | pandas.DataFrame |
import pandas as pd
from scripts.python.routines.manifest import get_manifest
from tqdm import tqdm
from scripts.python.EWAS.routines.correction import correct_pvalues
import plotly.graph_objects as go
from scripts.python.routines.plot.save import save_figure
from scripts.python.routines.plot.scatter import add_scatter_trace
from scripts.python.routines.plot.layout import add_layout
import os
import numpy as np
from pingouin import ancova
from scripts.python.routines.filter.pheno import filter_pheno
platform = "GPL13534"
path = f"E:/YandexDisk/Work/pydnameth/datasets"
datasets = ["GSE53740"]
is_rerun = True
num_cpgs_to_plot = 10
for dataset in datasets:
print(dataset)
status_col = get_column_name(dataset, 'Status').replace(' ', '_')
age_col = get_column_name(dataset, 'Age').replace(' ', '_')
sex_col = get_column_name(dataset, 'Sex').replace(' ', '_')
status_dict = get_status_dict(dataset)
status_vals = sorted(list(status_dict.values()))
status_names_dict = get_status_names_dict(dataset)
sex_dict = get_sex_dict(dataset)
terms = [status_col, age_col]
aim = f"Age_Status"
path_save = f"{path}/{platform}/{dataset}/EWAS/ancova/{aim}"
if not os.path.exists(f"{path_save}/figs"):
os.makedirs(f"{path_save}/figs")
continuous_vars = {'Age': age_col}
categorical_vars = {status_col: status_dict, sex_col: sex_dict}
pheno = | pd.read_pickle(f"{path}/{platform}/{dataset}/pheno_xtd.pkl") | pandas.read_pickle |
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal
from woodwork.logical_types import (
Boolean,
Categorical,
Double,
Integer,
NaturalLanguage
)
from evalml.pipelines.components import Imputer
@pytest.fixture
def imputer_test_data():
return pd.DataFrame({
"categorical col": pd.Series(["zero", "one", "two", "zero", "three"], dtype='category'),
"int col": [0, 1, 2, 0, 3],
"object col": ["b", "b", "a", "c", "d"],
"float col": [0.0, 1.0, 0.0, -2.0, 5.],
"bool col": [True, False, False, True, True],
"categorical with nan": pd.Series([np.nan, "1", np.nan, "0", "3"], dtype='category'),
"int with nan": [np.nan, 1, 0, 0, 1],
"float with nan": [0.0, 1.0, np.nan, -1.0, 0.],
"object with nan": ["b", "b", np.nan, "c", np.nan],
"bool col with nan": pd.Series([True, np.nan, False, np.nan, True], dtype='boolean'),
"all nan": [np.nan, np.nan, np.nan, np.nan, np.nan],
"all nan cat": pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan], dtype='category')
})
def test_invalid_strategy_parameters():
with pytest.raises(ValueError, match="Valid impute strategies are"):
Imputer(numeric_impute_strategy="not a valid strategy")
with pytest.raises(ValueError, match="Valid categorical impute strategies are"):
Imputer(categorical_impute_strategy="mean")
def test_imputer_default_parameters():
imputer = Imputer()
expected_parameters = {
'categorical_impute_strategy': 'most_frequent',
'numeric_impute_strategy': 'mean',
'categorical_fill_value': None,
'numeric_fill_value': None
}
assert imputer.parameters == expected_parameters
@pytest.mark.parametrize("categorical_impute_strategy", ["most_frequent", "constant"])
@pytest.mark.parametrize("numeric_impute_strategy", ["mean", "median", "most_frequent", "constant"])
def test_imputer_init(categorical_impute_strategy, numeric_impute_strategy):
imputer = Imputer(categorical_impute_strategy=categorical_impute_strategy,
numeric_impute_strategy=numeric_impute_strategy,
categorical_fill_value="str_fill_value",
numeric_fill_value=-1)
expected_parameters = {
'categorical_impute_strategy': categorical_impute_strategy,
'numeric_impute_strategy': numeric_impute_strategy,
'categorical_fill_value': 'str_fill_value',
'numeric_fill_value': -1
}
expected_hyperparameters = {
"categorical_impute_strategy": ["most_frequent"],
"numeric_impute_strategy": ["mean", "median", "most_frequent"]
}
assert imputer.name == "Imputer"
assert imputer.parameters == expected_parameters
assert imputer.hyperparameter_ranges == expected_hyperparameters
def test_numeric_only_input(imputer_test_data):
X = imputer_test_data[["int col", "float col",
"int with nan", "float with nan", "all nan"]]
y = pd.Series([0, 0, 1, 0, 1])
imputer = Imputer(numeric_impute_strategy="median")
imputer.fit(X, y)
transformed = imputer.transform(X, y)
expected = pd.DataFrame({
"int col": [0, 1, 2, 0, 3],
"float col": [0.0, 1.0, 0.0, -2.0, 5.],
"int with nan": [0.5, 1.0, 0.0, 0.0, 1.0],
"float with nan": [0.0, 1.0, 0, -1.0, 0.]
})
assert_frame_equal(transformed.to_dataframe(), expected, check_dtype=False)
imputer = Imputer()
transformed = imputer.fit_transform(X, y)
assert_frame_equal(transformed.to_dataframe(), expected, check_dtype=False)
def test_categorical_only_input(imputer_test_data):
X = imputer_test_data[["categorical col", "object col", "bool col",
"categorical with nan", "object with nan",
"bool col with nan", "all nan cat"]]
y = pd.Series([0, 0, 1, 0, 1])
imputer = Imputer()
imputer.fit(X, y)
transformed = imputer.transform(X, y)
expected = pd.DataFrame({
"categorical col": pd.Series(["zero", "one", "two", "zero", "three"], dtype='category'),
"object col": pd.Series(["b", "b", "a", "c", "d"], dtype='category'),
"bool col": [True, False, False, True, True],
"categorical with nan": pd.Series(["0", "1", "0", "0", "3"], dtype='category'),
"object with nan": pd.Series(["b", "b", "b", "c", "b"], dtype='category'),
"bool col with nan": [True, True, False, True, True]
})
imputer = Imputer()
transformed = imputer.fit_transform(X, y)
assert_frame_equal(transformed.to_dataframe(), expected, check_dtype=False)
def test_categorical_and_numeric_input(imputer_test_data):
X = imputer_test_data
y = pd.Series([0, 0, 1, 0, 1])
imputer = Imputer()
imputer.fit(X, y)
transformed = imputer.transform(X, y)
expected = pd.DataFrame({
"categorical col": pd.Series(["zero", "one", "two", "zero", "three"], dtype='category'),
"int col": [0, 1, 2, 0, 3],
"object col": pd.Series(["b", "b", "a", "c", "d"], dtype='category'),
"float col": [0.0, 1.0, 0.0, -2.0, 5.],
"bool col": [True, False, False, True, True],
"categorical with nan": pd.Series(["0", "1", "0", "0", "3"], dtype='category'),
"int with nan": [0.5, 1.0, 0.0, 0.0, 1.0],
"float with nan": [0.0, 1.0, 0, -1.0, 0.],
"object with nan": pd.Series(["b", "b", "b", "c", "b"], dtype='category'),
"bool col with nan": [True, True, False, True, True]
})
assert_frame_equal(transformed.to_dataframe(), expected, check_dtype=False)
imputer = Imputer()
transformed = imputer.fit_transform(X, y)
assert_frame_equal(transformed.to_dataframe(), expected, check_dtype=False)
def test_drop_all_columns(imputer_test_data):
X = imputer_test_data[["all nan cat", "all nan"]]
y = | pd.Series([0, 0, 1, 0, 1]) | pandas.Series |
#!/usr/bin/env python
# Copyright 2021 Owkin, inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import configargparse
import torch
import pandas as pd
import numpy as np
from src.models.logistic_regression_model import LogisticRegression
from src.utils.format_data import create_test_dataset_without_split
from src.utils.genes_selection import genes_selection_extraction
from src.utils.pytorch_evaluation import predict
def main(args):
sizemodel = int(args.model_path.split("sizemodel")[1].split(".")[0])
model = LogisticRegression(sizemodel).cpu()
model.load_state_dict(torch.load(args.model_path))
X_test = create_test_dataset_without_split(args.test_file)
if sizemodel == 69:
X_test = genes_selection_extraction(X_test, "rotterdam")
else:
X_test = genes_selection_extraction(X_test, "citbcmst")
prediction_results = | pd.DataFrame() | pandas.DataFrame |
import os, sys
import shutil
from pathlib import Path
import pandas as pd
import urllib
import configparser
try:
from bing import Bing
except ImportError: # Python 3
from .bing import Bing
def download(query, limit=100, output_dir='dataset', adult_filter_off=False,
force_replace=False, timeout=60, verbose=True, filters=''):
# engine = 'bing'
if adult_filter_off:
adult = 'off'
else:
adult = 'on'
image_dir = Path(output_dir).joinpath(query).absolute()
if force_replace:
if Path.isdir(image_dir):
shutil.rmtree(image_dir)
# check directory and create if necessary
try:
if not Path.is_dir(image_dir):
Path.mkdir(image_dir, parents=True)
except Exception as e:
print('[Error]Failed to create directory.', e)
sys.exit(1)
print("[%] Downloading Images to {}".format(str(image_dir.absolute())))
bing = Bing(query, limit, image_dir, adult, timeout, filters, verbose)
bing.run()
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def main():
config = configparser.RawConfigParser()
config.read('filters.cfg')
details_dict = dict(config.items('settings'))
# print(f"details_dict: ", details_dict)
task_filename = "task_list.xlsx"
df = | pd.read_excel(task_filename) | pandas.read_excel |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from mabwiser.mab import MAB, LearningPolicy, NeighborhoodPolicy
from tests.test_base import BaseTest
class MABTest(BaseTest):
#################################################
# Test context free predict() method
################################################
def test_arm_list_int(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_arm_list_str(self):
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_decision_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_series_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_array_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
#################################################
# Test context free predict_expectation() method
################################################
def test_exp_arm_list_int(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_arm_list_str(self):
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_series_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_array_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_context_history_series(self):
contexts = pd.DataFrame({'column1': [1, 2, 3], 'column2': [2, 3, 1]})
for lp in BaseTest.para_lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1],
rewards=[0, 0, 0],
learning_policy=lp,
context_history=contexts['column1'],
contexts=[[1]],
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(mab._imp.arm_to_model[0].beta.shape[0], 1)
for cp in BaseTest.nps:
for lp in BaseTest.lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1],
rewards=[0, 0, 0],
learning_policy=lp,
neighborhood_policy=cp,
context_history=contexts['column1'],
contexts=[[1]],
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(np.ndim(mab._imp.contexts), 2)
for cp in BaseTest.cps:
for lp in BaseTest.lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1],
rewards=[0, 0, 0],
learning_policy=lp,
neighborhood_policy=cp,
context_history=contexts['column1'],
contexts=[[1]],
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(np.ndim(mab._imp.contexts), 2)
def test_context_series(self):
contexts = pd.DataFrame({'column1': [1, 2, 3, 3, 2, 1], 'column2': [2, 3, 1, 1, 2, 3]})
for lp in BaseTest.para_lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1, 1, 1, 1],
rewards=[0, 0, 0, 0, 0, 0],
learning_policy=lp,
context_history=contexts['column1'],
contexts=pd.Series([1]),
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(mab._imp.arm_to_model[0].beta.shape[0], 1)
for cp in BaseTest.nps:
for lp in BaseTest.lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1, 1, 1, 1],
rewards=[0, 0, 0, 0, 0, 0],
learning_policy=lp,
neighborhood_policy=cp,
context_history=contexts['column1'],
contexts=pd.Series([1]),
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(np.ndim(mab._imp.contexts), 2)
for cp in BaseTest.cps:
for lp in BaseTest.lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1, 1, 1, 1],
rewards=[0, 0, 0, 0, 0, 0],
learning_policy=lp,
neighborhood_policy=cp,
context_history=contexts['column1'],
contexts=pd.Series([1]),
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(np.ndim(mab._imp.contexts), 2)
#################################################
# Test contextual predict() method
################################################
def test_context_arm_list_int(self):
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.nps:
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.cps:
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_context_arm_list_str(self):
for lp in MABTest.para_lps:
self.predict(arms=["A", "B", "C", "D"],
decisions=["A", "A", "A", "B", "B", "C", "C", "C", "C", "C"],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.nps:
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C", "D"],
decisions=["A", "A", "A", "B", "B", "C", "C", "C", "C", "C"],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.cps:
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C", "D"],
decisions=["A", "A", "A", "B", "B", "C", "C", "C", "C", "C"],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, -2, 2, 3, 11], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, -5, 2, 3, 10], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, -2, 4, 3, 9], [20, 19, 18, 17, 16], [1, 2, 1, 1, 3],
[17, 18, 17, 19, 18]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_context_decision_series(self):
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3, 4],
decisions=pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.nps:
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3, 4],
decisions=pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.cps:
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3, 4],
decisions=pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_context_reward_series(self):
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards= | pd.Series([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]) | pandas.Series |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import classification_report
# Read a csv using the pandas read_csv function
data = pd.read_csv('bank.csv',sep=',',header='infer')
# Remove columns named day, poutcome, contact
data = data.drop(['day','poutcome','contact'],axis=1)
def normalize(data):
# Before we can feed the data to train
# and test the classifier, we need to normalize
# the data to acceptable and convenient values
# for cross validation and prediction purposes later on
data.y.replace(('yes', 'no'), (1, 0), inplace=True)
data.default.replace(('yes','no'),(1,0),inplace=True)
data.housing.replace(('yes','no'),(1,0),inplace=True)
data.loan.replace(('yes','no'),(1,0),inplace=True)
data.marital.replace(('married','single','divorced'),(1,2,3),inplace=True)
data.month.replace(('jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec'),(1,2,3,4,5,6,7,8,9,10,11,12),inplace=True)
data.education.replace(('primary','secondary','tertiary','unknown'),(1,2,3,4),inplace=True)
data.job.replace(('technician','services','retired','blue-collar','entrepreneur','admin.',
'housemaid','student','self-employed','management',
'unemployed','unknown'),(1,2,3,4,5,6,7,8,9,10,11,12),inplace=True )
return data
def experiment_generator(train_feats, train_class):
# Initialize the plotting sets for later use
accuracy, penalties = [], []
# Set your G & C parameters
G = .00000001 # Should be really small to allow better curviture values
penalty = 1 # Use large value to minimize the prediction error
N = 10 # Number of experiments
for item in range(N):
# Create a new classifier using the G & C parameters
clf = SVC(kernel='rbf', random_state = 0, gamma = G, C = penalty, probability=True)
# Train the rbf classifier using training features and training class
clf.fit(train_feats, train_class.values.ravel())
# Make prediction using training features
pred_train = clf.predict(train_feats)
# Accuracy score
s_train = accuracy_score(train_class, pred_train)
# Store values for plotting
penalties.append(penalty)
accuracy.append(s_train)
# Increase experiment parameters
penalty += 1
G += .00000001
# Initialize plot for accuracy and penalty (C)
plt.scatter(penalties, accuracy)
plt.ylabel('Accuracy (%)')
plt.xlabel('Penalty - C Parameter')
plt.show()
data = normalize(data)
plt.hist((data.duration),bins=100)
plt.ylabel('Occurences (Frequency)')
plt.xlabel('Client Call Duration')
plt.show()
plt.hist((data.job),bins=10)
plt.ylabel('Occurences (Frequency)')
plt.xlabel('Client Job Indices')
plt.show()
plt.hist((data.balance),bins=10)
plt.ylabel('Occurences (Frequency)')
plt.xlabel('Client Balance')
plt.show()
# Create training and testing vars
X_train, X_test, y_train, y_test = train_test_split(data, data.y, test_size=0.2)
# Debugging
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
# Define your initial features
df_train = X_train
df_test = X_test
# Initialize a dataframe using the target training y column
df_train_class = pd.DataFrame(df_train['y'])
# Set the features to the rest of the columns (columns that are not 'y')
df_train_features = df_train.loc[:, df_train.columns != 'y']
# Initialize a dataframe using the target test y column
df_test_class = | pd.DataFrame(df_test['y']) | pandas.DataFrame |
# coding: utf-8
# In[ ]:
from __future__ import division
import os as os
from IPython.display import HTML
import pandas as pd
import numpy as np
import os as os
from matplotlib import pyplot as plt
import seaborn as sns
from numpy import random as random
from matplotlib.colors import ListedColormap
plt.rcParams['figure.figsize'] = (12, 12)
plt.rcParams['font.size'] = 11
plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['axes.labelsize'] = plt.rcParams['font.size']
plt.rcParams['axes.titlesize'] = 1.5*plt.rcParams['font.size']
plt.rcParams['legend.fontsize'] = plt.rcParams['font.size']
plt.rcParams['xtick.labelsize'] = plt.rcParams['font.size']
plt.rcParams['ytick.labelsize'] = plt.rcParams['font.size']
#plt.rcParams['savefig.dpi'] = 3*plt.rcParams['savefig.dpi']
plt.rcParams['xtick.major.size'] = 3
plt.rcParams['xtick.minor.size'] = 3
plt.rcParams['xtick.major.width'] = 1
plt.rcParams['xtick.minor.width'] = 1
plt.rcParams['ytick.major.size'] = 3
plt.rcParams['ytick.minor.size'] = 3
plt.rcParams['ytick.major.width'] = 1
plt.rcParams['ytick.minor.width'] = 1
plt.rcParams['legend.frameon'] = False
plt.rcParams['legend.loc'] = 'center left'
plt.rcParams['axes.linewidth'] = 1
plt.gca().spines['right'].set_color('none')
plt.gca().spines['top'].set_color('none')
plt.gca().xaxis.set_ticks_position('bottom')
plt.gca().yaxis.set_ticks_position('left')
sns.set_style('whitegrid')
plt.close()
import re
from statsmodels.tsa.stattools import adfuller, acf, pacf
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.seasonal import seasonal_decompose
from sklearn.metrics import mean_squared_error
import matplotlib.pylab as plt
from matplotlib.pylab import rcParams
from statsmodels.tsa.seasonal import seasonal_decompose
from itertools import count #,izip
import matplotlib.pyplot as plt
from numpy import linspace, loadtxt, ones, convolve
import numpy as np
import pandas as pd
import collections
import plotly.offline as offline
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot,iplot
#import plotly.plotly as py
import chart_studio.plotly as py
#import plotly.io as pio
py.sign_in('PranavPandit', '<KEY>')
#import cufflinks as cf
#####################################################################################################################
#####################################################################################################################
def read_data(filename):
data = pd.read_csv(filename, encoding= 'unicode_escape')
#data = pd.merge(data, sc_names, left_on='Species', right_on='Training_data_name', how='left')
#data = pd.read_csv(filename, low_memory=False, skiprows= [0])
#data.columns = ['ID', 'Case', 'Admitted_at', 'Species', 'Organization','Reasons', 'Address_found', 'Latitude', 'Longitude',
# 'Care_rescuer', 'Disposition', 'Dispositioned_at', 'Diagnosis', 'Sex', 'Weight', 'Age', 'Condition']
data['Admitted_at'] = pd.to_datetime(data['Admitted_at']).dt.strftime('%Y-%m-%d')
data.set_index(pd.DatetimeIndex(data['Admitted_at']), inplace= True, drop= False)
return data
#####################################################################################################################
#####################################################################################################################
def test_stationarity(SpeciesName, syndrome, data):
vo = data[(data.ScientificName == SpeciesName) & (data.prediction_adjusted == syndrome)]
weekly_vo = vo.resample('W')['WRMD_ID'].count()
#Determing rolling statistics
rolmean = weekly_vo.rolling(window=4,center=False).mean()
rolstd = weekly_vo.rolling(window=4,center=False).std()
sp_data = pd.concat([weekly_vo, rolmean, rolstd], axis=1)
sp_data.columns = ['original', 'rolling mean', 'std deviation']
layout = go.Layout(
title= str(SpeciesName)+': admissions, rolling mean & std deviation',
yaxis=dict(title='# admissions'),
xaxis=dict(title='time'))
#Plot rolling statistics:
sp_data.iplot(kind='scatter',layout=layout)
#Perform Dickey-Fuller test:
print ('Results of Dickey-Fuller Test:')
dftest = adfuller(weekly_vo, autolag='AIC')
dfoutput = | pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used']) | pandas.Series |
import sys
from datetime import datetime
import statistics
import pandas
from scipy.stats import linregress
import src.point as point
def main():
if len(sys.argv) not in (3, 4):
exit("Invalid number of arguments. Input and output .csv files' names required, may be followed by cities csv")
input_file = sys.argv[1]
output_file = sys.argv[2]
data_frame = pandas.read_csv(input_file, usecols=["dt", "AverageTemperature", "City", "Latitude", "Longitude"])
data_by_location = data_frame.groupby(by=["City", "Latitude", "Longitude"], as_index=False)
data_by_location = data_by_location.apply(temperature_series_to_regression)
data_by_location = data_by_location.reset_index()
if len(sys.argv) == 4:
cities_file = sys.argv[3]
cities_data_frame = pandas.read_csv(cities_file, usecols=["AccentCity", "Latitude", "Longitude"],
encoding="ISO-8859-1")
cities_data_frame = cities_data_frame\
.assign(acccity=lambda df: df['AccentCity'].str.lower())\
.set_index('acccity')
data_by_location = data_by_location.apply(fix_cities_location, axis=1, cities_data_frame=cities_data_frame)
data_by_location = data_by_location.drop_duplicates(subset=["Latitude", "Longitude"])
data_by_location.to_csv(output_file, header=True)
points = point.load_from_csv(output_file)
locations = [(p.longitude, p.latitude) for p in points]
unique_locations = set(locations)
print("Points: {}, Unique points: {}".format(len(locations), len(unique_locations)))
median, mean = count_stats(data_by_location)
print("Average regression: {}, median: {}".format(mean, median))
def temperature_series_to_regression(temperatures: pandas.Series):
temperatures = temperatures.dropna()
regression = linregress(temperatures["dt"].map(map_date), temperatures["AverageTemperature"])[0]
print("{}: {}".format(list(temperatures["City"])[0], regression))
return | pandas.Series(regression, index=["Regression"]) | pandas.Series |
import numpy as np
import re
from enum import Enum
import pandas as pd
ORIGINAL_DATA_DIR = "./original_transactions/"
CLEAN_DATA_DIR = "./clean_transactions/"
BOA_COLS = ["Posted Date", "Payee", "Amount"]
CHASE_COLS = ["Transaction Date", "Description", "Amount"]
CITI_COLS = ["Date", "Description", "Amount"]
GENERIC_COLS = ["Date", "Description", "Amount"]
# to convert raw data (downloaded from various bank site) to clean data
# definition of clean data: transaction_date, description, amount
# raw data will be used as 1)training data for labeling 2) for testing
class Bank(Enum):
AMEX = 1
BOA = 2
CHASE = 3
CITI = 4
DISCOVER = 5
class DataProcessor:
# input file: /path/to/files/AMEX_3008.csv
# eg. ./original_transactions/AMEX_
def __init__(self, input_file):
self.raw_transaction_file = input_file
self.bank, self.input_file_name = self._get_bank_name(input_file)
self.output_file = None
def generate_clean_data(self):
output_file_name = self._get_output_file_name(self.input_file_name)
self.output_file = CLEAN_DATA_DIR + output_file_name
if self.bank == Bank.AMEX.name:
self._parse_AMEX()
elif self.bank == Bank.BOA.name:
self._parse_BOA()
elif self.bank == Bank.CHASE.name:
self._parse_CHASE()
elif self.bank == Bank.CITI.name:
self._parse_CITI()
pass
elif self.bank == Bank.DISCOVER.name:
# TODO
pass
else:
print("Unsupported Bank type -- " + self.bank_name)
def _parse_AMEX(self):
df = pd.read_csv(self.raw_transaction_file)
# In Amex raw data, 1st, 3rd, 4th col are
# date, amount, description
df = df.iloc[:, [0, 3, 2]]
df.columns = GENERIC_COLS
df.to_csv(self.output_file, index=False)
def _parse_BOA(self):
df = pd.read_csv(self.raw_transaction_file)
df = df[BOA_COLS]
df.columns = GENERIC_COLS
df.to_csv(self.output_file, index=False)
def _parse_CHASE(self):
df = pd.read_csv(self.raw_transaction_file)
df = df[CHASE_COLS]
df.columns = GENERIC_COLS
df.to_csv(self.output_file, index=False)
def _parse_CITI(self):
"""
In Citi csv file, Debit is positive value, Credit is negative value,
They are of 2 different columns. Need to create a new col "Amount" that
reverse the original sign.
"""
df = pd.read_csv(self.raw_transaction_file)
# print(df)
df["Amount"] = df.apply(lambda row: row.Debit * (-1) if not | pd.isnull(row.Debit) | pandas.isnull |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, concat
from pandas.core.base import DataError
from pandas.util import testing as tm
def test_rank_apply():
lev1 = tm.rands_array(10, 100)
lev2 = tm.rands_array(10, 130)
lab1 = np.random.randint(0, 100, size=500)
lab2 = np.random.randint(0, 130, size=500)
df = DataFrame(
{
"value": np.random.randn(500),
"key1": lev1.take(lab1),
"key2": lev2.take(lab2),
}
)
result = df.groupby(["key1", "key2"]).value.rank()
expected = [piece.value.rank() for key, piece in df.groupby(["key1", "key2"])]
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
tm.assert_series_equal(result, expected)
result = df.groupby(["key1", "key2"]).value.rank(pct=True)
expected = [
piece.value.rank(pct=True) for key, piece in df.groupby(["key1", "key2"])
]
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
@pytest.mark.parametrize(
"vals",
[
[2, 2, 8, 2, 6],
[
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-08"),
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-06"),
],
],
)
@pytest.mark.parametrize(
"ties_method,ascending,pct,exp",
[
("average", True, False, [2.0, 2.0, 5.0, 2.0, 4.0]),
("average", True, True, [0.4, 0.4, 1.0, 0.4, 0.8]),
("average", False, False, [4.0, 4.0, 1.0, 4.0, 2.0]),
("average", False, True, [0.8, 0.8, 0.2, 0.8, 0.4]),
("min", True, False, [1.0, 1.0, 5.0, 1.0, 4.0]),
("min", True, True, [0.2, 0.2, 1.0, 0.2, 0.8]),
("min", False, False, [3.0, 3.0, 1.0, 3.0, 2.0]),
("min", False, True, [0.6, 0.6, 0.2, 0.6, 0.4]),
("max", True, False, [3.0, 3.0, 5.0, 3.0, 4.0]),
("max", True, True, [0.6, 0.6, 1.0, 0.6, 0.8]),
("max", False, False, [5.0, 5.0, 1.0, 5.0, 2.0]),
("max", False, True, [1.0, 1.0, 0.2, 1.0, 0.4]),
("first", True, False, [1.0, 2.0, 5.0, 3.0, 4.0]),
("first", True, True, [0.2, 0.4, 1.0, 0.6, 0.8]),
("first", False, False, [3.0, 4.0, 1.0, 5.0, 2.0]),
("first", False, True, [0.6, 0.8, 0.2, 1.0, 0.4]),
("dense", True, False, [1.0, 1.0, 3.0, 1.0, 2.0]),
("dense", True, True, [1.0 / 3.0, 1.0 / 3.0, 3.0 / 3.0, 1.0 / 3.0, 2.0 / 3.0]),
("dense", False, False, [3.0, 3.0, 1.0, 3.0, 2.0]),
("dense", False, True, [3.0 / 3.0, 3.0 / 3.0, 1.0 / 3.0, 3.0 / 3.0, 2.0 / 3.0]),
],
)
def test_rank_args(grps, vals, ties_method, ascending, pct, exp):
key = np.repeat(grps, len(vals))
vals = vals * len(grps)
df = DataFrame({"key": key, "val": vals})
result = df.groupby("key").rank(method=ties_method, ascending=ascending, pct=pct)
exp_df = DataFrame(exp * len(grps), columns=["val"])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
@pytest.mark.parametrize(
"vals", [[-np.inf, -np.inf, np.nan, 1.0, np.nan, np.inf, np.inf]]
)
@pytest.mark.parametrize(
"ties_method,ascending,na_option,exp",
[
("average", True, "keep", [1.5, 1.5, np.nan, 3, np.nan, 4.5, 4.5]),
("average", True, "top", [3.5, 3.5, 1.5, 5.0, 1.5, 6.5, 6.5]),
("average", True, "bottom", [1.5, 1.5, 6.5, 3.0, 6.5, 4.5, 4.5]),
("average", False, "keep", [4.5, 4.5, np.nan, 3, np.nan, 1.5, 1.5]),
("average", False, "top", [6.5, 6.5, 1.5, 5.0, 1.5, 3.5, 3.5]),
("average", False, "bottom", [4.5, 4.5, 6.5, 3.0, 6.5, 1.5, 1.5]),
("min", True, "keep", [1.0, 1.0, np.nan, 3.0, np.nan, 4.0, 4.0]),
("min", True, "top", [3.0, 3.0, 1.0, 5.0, 1.0, 6.0, 6.0]),
("min", True, "bottom", [1.0, 1.0, 6.0, 3.0, 6.0, 4.0, 4.0]),
("min", False, "keep", [4.0, 4.0, np.nan, 3.0, np.nan, 1.0, 1.0]),
("min", False, "top", [6.0, 6.0, 1.0, 5.0, 1.0, 3.0, 3.0]),
("min", False, "bottom", [4.0, 4.0, 6.0, 3.0, 6.0, 1.0, 1.0]),
("max", True, "keep", [2.0, 2.0, np.nan, 3.0, np.nan, 5.0, 5.0]),
("max", True, "top", [4.0, 4.0, 2.0, 5.0, 2.0, 7.0, 7.0]),
("max", True, "bottom", [2.0, 2.0, 7.0, 3.0, 7.0, 5.0, 5.0]),
("max", False, "keep", [5.0, 5.0, np.nan, 3.0, np.nan, 2.0, 2.0]),
("max", False, "top", [7.0, 7.0, 2.0, 5.0, 2.0, 4.0, 4.0]),
("max", False, "bottom", [5.0, 5.0, 7.0, 3.0, 7.0, 2.0, 2.0]),
("first", True, "keep", [1.0, 2.0, np.nan, 3.0, np.nan, 4.0, 5.0]),
("first", True, "top", [3.0, 4.0, 1.0, 5.0, 2.0, 6.0, 7.0]),
("first", True, "bottom", [1.0, 2.0, 6.0, 3.0, 7.0, 4.0, 5.0]),
("first", False, "keep", [4.0, 5.0, np.nan, 3.0, np.nan, 1.0, 2.0]),
("first", False, "top", [6.0, 7.0, 1.0, 5.0, 2.0, 3.0, 4.0]),
("first", False, "bottom", [4.0, 5.0, 6.0, 3.0, 7.0, 1.0, 2.0]),
("dense", True, "keep", [1.0, 1.0, np.nan, 2.0, np.nan, 3.0, 3.0]),
("dense", True, "top", [2.0, 2.0, 1.0, 3.0, 1.0, 4.0, 4.0]),
("dense", True, "bottom", [1.0, 1.0, 4.0, 2.0, 4.0, 3.0, 3.0]),
("dense", False, "keep", [3.0, 3.0, np.nan, 2.0, np.nan, 1.0, 1.0]),
("dense", False, "top", [4.0, 4.0, 1.0, 3.0, 1.0, 2.0, 2.0]),
("dense", False, "bottom", [3.0, 3.0, 4.0, 2.0, 4.0, 1.0, 1.0]),
],
)
def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):
# GH 20561
key = np.repeat(grps, len(vals))
vals = vals * len(grps)
df = DataFrame({"key": key, "val": vals})
result = df.groupby("key").rank(
method=ties_method, ascending=ascending, na_option=na_option
)
exp_df = DataFrame(exp * len(grps), columns=["val"])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
@pytest.mark.parametrize(
"vals",
[
[2, 2, np.nan, 8, 2, 6, np.nan, np.nan],
[
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-02"),
np.nan,
| pd.Timestamp("2018-01-08") | pandas.Timestamp |
import sys
import os
import math
import datetime
import itertools
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from statsmodels.tsa.stattools import grangercausalitytests
import scipy.stats as stats
from mesa.batchrunner import BatchRunner, BatchRunnerMP
from mesa.datacollection import DataCollector
from project_material.model.network import HostNetwork
class CustomBatchRunner(BatchRunner):
def run_model(self, model):
while model.schedule.steps < self.max_steps:
model.step()
def track_params(model):
return (
model.num_nodes,
model.avg_node_degree,
model.initial_outbreak_size,
model.prob_spread_virus_gamma_shape,
model.prob_spread_virus_gamma_scale,
model.prob_spread_virus_gamma_loc,
model.prob_spread_virus_gamma_magnitude_multiplier,
model.prob_recover_gamma_shape,
model.prob_recover_gamma_scale,
model.prob_recover_gamma_loc,
model.prob_recover_gamma_magnitude_multiplier,
model.prob_virus_kill_host_gamma_shape,
model.prob_virus_kill_host_gamma_scale,
model.prob_virus_kill_host_gamma_loc,
model.prob_virus_kill_host_gamma_magnitude_multiplier,
model.prob_infectious_no_to_mild_symptom_gamma_shape,
model.prob_infectious_no_to_mild_symptom_gamma_scale,
model.prob_infectious_no_to_mild_symptom_gamma_loc,
model.prob_infectious_no_to_mild_symptom_gamma_magnitude_multiplier,
model.prob_infectious_no_to_severe_symptom_gamma_shape,
model.prob_infectious_no_to_severe_symptom_gamma_scale,
model.prob_infectious_no_to_severe_symptom_gamma_loc,
model.prob_infectious_no_to_severe_symptom_gamma_magnitude_multiplier,
model.prob_infectious_no_to_critical_symptom_gamma_shape,
model.prob_infectious_no_to_critical_symptom_gamma_scale,
model.prob_infectious_no_to_critical_symptom_gamma_loc,
model.prob_infectious_no_to_critical_symptom_gamma_magnitude_multiplier,
model.prob_infectious_mild_to_no_symptom_gamma_shape,
model.prob_infectious_mild_to_no_symptom_gamma_scale,
model.prob_infectious_mild_to_no_symptom_gamma_loc,
model.prob_infectious_mild_to_no_symptom_gamma_magnitude_multiplier,
model.prob_infectious_mild_to_severe_symptom_gamma_shape,
model.prob_infectious_mild_to_severe_symptom_gamma_scale,
model.prob_infectious_mild_to_severe_symptom_gamma_loc,
model.prob_infectious_mild_to_severe_symptom_gamma_magnitude_multiplier,
model.prob_infectious_mild_to_critical_symptom_gamma_shape,
model.prob_infectious_mild_to_critical_symptom_gamma_scale,
model.prob_infectious_mild_to_critical_symptom_gamma_loc,
model.prob_infectious_mild_to_critical_symptom_gamma_magnitude_multiplier,
model.prob_infectious_severe_to_no_symptom_gamma_shape,
model.prob_infectious_severe_to_no_symptom_gamma_scale,
model.prob_infectious_severe_to_no_symptom_gamma_loc,
model.prob_infectious_severe_to_no_symptom_gamma_magnitude_multiplier,
model.prob_infectious_severe_to_mild_symptom_gamma_shape,
model.prob_infectious_severe_to_mild_symptom_gamma_scale,
model.prob_infectious_severe_to_mild_symptom_gamma_loc,
model.prob_infectious_severe_to_mild_symptom_gamma_magnitude_multiplier,
model.prob_infectious_severe_to_critical_symptom_gamma_shape,
model.prob_infectious_severe_to_critical_symptom_gamma_scale,
model.prob_infectious_severe_to_critical_symptom_gamma_loc,
model.prob_infectious_severe_to_critical_symptom_gamma_magnitude_multiplier,
model.prob_infectious_critical_to_no_symptom_gamma_shape,
model.prob_infectious_critical_to_no_symptom_gamma_scale,
model.prob_infectious_critical_to_no_symptom_gamma_loc,
model.prob_infectious_critical_to_no_symptom_gamma_magnitude_multiplier,
model.prob_infectious_critical_to_mild_symptom_gamma_shape,
model.prob_infectious_critical_to_mild_symptom_gamma_scale,
model.prob_infectious_critical_to_mild_symptom_gamma_loc,
model.prob_infectious_critical_to_mild_symptom_gamma_magnitude_multiplier,
model.prob_infectious_critical_to_severe_symptom_gamma_shape,
model.prob_infectious_critical_to_severe_symptom_gamma_scale,
model.prob_infectious_critical_to_severe_symptom_gamma_loc,
model.prob_infectious_critical_to_severe_symptom_gamma_magnitude_multiplier,
model.prob_recovered_no_to_mild_complication,
model.prob_recovered_no_to_severe_complication,
model.prob_recovered_mild_to_no_complication,
model.prob_recovered_mild_to_severe_complication,
model.prob_recovered_severe_to_no_complication,
model.prob_recovered_severe_to_mild_complication,
model.prob_gain_immunity,
model.hospital_bed_capacity_as_percent_of_population,
model.hospital_bed_cost_per_day,
model.icu_bed_capacity_as_percent_of_population,
model.icu_bed_cost_per_day,
model.ventilator_capacity_as_percent_of_population,
model.ventilator_cost_per_day,
model.drugX_capacity_as_percent_of_population,
model.drugX_cost_per_day,
)
def track_run(model):
return model.uid
class BatchHostNetwork(HostNetwork):
# id generator to track run number in batch run data
id_gen = itertools.count(1)
def __init__(self, num_nodes, avg_node_degree, initial_outbreak_size,
prob_spread_virus_gamma_shape,
prob_spread_virus_gamma_scale,
prob_spread_virus_gamma_loc,
prob_spread_virus_gamma_magnitude_multiplier,
prob_recover_gamma_shape,
prob_recover_gamma_scale,
prob_recover_gamma_loc,
prob_recover_gamma_magnitude_multiplier,
prob_virus_kill_host_gamma_shape,
prob_virus_kill_host_gamma_scale,
prob_virus_kill_host_gamma_loc,
prob_virus_kill_host_gamma_magnitude_multiplier,
prob_infectious_no_to_mild_symptom_gamma_shape,
prob_infectious_no_to_mild_symptom_gamma_scale,
prob_infectious_no_to_mild_symptom_gamma_loc,
prob_infectious_no_to_mild_symptom_gamma_magnitude_multiplier,
prob_infectious_no_to_severe_symptom_gamma_shape,
prob_infectious_no_to_severe_symptom_gamma_scale,
prob_infectious_no_to_severe_symptom_gamma_loc,
prob_infectious_no_to_severe_symptom_gamma_magnitude_multiplier,
prob_infectious_no_to_critical_symptom_gamma_shape,
prob_infectious_no_to_critical_symptom_gamma_scale,
prob_infectious_no_to_critical_symptom_gamma_loc,
prob_infectious_no_to_critical_symptom_gamma_magnitude_multiplier,
prob_infectious_mild_to_no_symptom_gamma_shape,
prob_infectious_mild_to_no_symptom_gamma_scale,
prob_infectious_mild_to_no_symptom_gamma_loc,
prob_infectious_mild_to_no_symptom_gamma_magnitude_multiplier,
prob_infectious_mild_to_severe_symptom_gamma_shape,
prob_infectious_mild_to_severe_symptom_gamma_scale,
prob_infectious_mild_to_severe_symptom_gamma_loc,
prob_infectious_mild_to_severe_symptom_gamma_magnitude_multiplier,
prob_infectious_mild_to_critical_symptom_gamma_shape,
prob_infectious_mild_to_critical_symptom_gamma_scale,
prob_infectious_mild_to_critical_symptom_gamma_loc,
prob_infectious_mild_to_critical_symptom_gamma_magnitude_multiplier,
prob_infectious_severe_to_no_symptom_gamma_shape,
prob_infectious_severe_to_no_symptom_gamma_scale,
prob_infectious_severe_to_no_symptom_gamma_loc,
prob_infectious_severe_to_no_symptom_gamma_magnitude_multiplier,
prob_infectious_severe_to_mild_symptom_gamma_shape,
prob_infectious_severe_to_mild_symptom_gamma_scale,
prob_infectious_severe_to_mild_symptom_gamma_loc,
prob_infectious_severe_to_mild_symptom_gamma_magnitude_multiplier,
prob_infectious_severe_to_critical_symptom_gamma_shape,
prob_infectious_severe_to_critical_symptom_gamma_scale,
prob_infectious_severe_to_critical_symptom_gamma_loc,
prob_infectious_severe_to_critical_symptom_gamma_magnitude_multiplier,
prob_infectious_critical_to_no_symptom_gamma_shape,
prob_infectious_critical_to_no_symptom_gamma_scale,
prob_infectious_critical_to_no_symptom_gamma_loc,
prob_infectious_critical_to_no_symptom_gamma_magnitude_multiplier,
prob_infectious_critical_to_mild_symptom_gamma_shape,
prob_infectious_critical_to_mild_symptom_gamma_scale,
prob_infectious_critical_to_mild_symptom_gamma_loc,
prob_infectious_critical_to_mild_symptom_gamma_magnitude_multiplier,
prob_infectious_critical_to_severe_symptom_gamma_shape,
prob_infectious_critical_to_severe_symptom_gamma_scale,
prob_infectious_critical_to_severe_symptom_gamma_loc,
prob_infectious_critical_to_severe_symptom_gamma_magnitude_multiplier,
prob_recovered_no_to_mild_complication,
prob_recovered_no_to_severe_complication,
prob_recovered_mild_to_no_complication,
prob_recovered_mild_to_severe_complication,
prob_recovered_severe_to_no_complication,
prob_recovered_severe_to_mild_complication,
prob_gain_immunity,
hospital_bed_capacity_as_percent_of_population,
hospital_bed_cost_per_day,
icu_bed_capacity_as_percent_of_population,
icu_bed_cost_per_day,
ventilator_capacity_as_percent_of_population,
ventilator_cost_per_day,
drugX_capacity_as_percent_of_population,
drugX_cost_per_day,
):
super().__init__(
num_nodes, avg_node_degree, initial_outbreak_size,
prob_spread_virus_gamma_shape,
prob_spread_virus_gamma_scale,
prob_spread_virus_gamma_loc,
prob_spread_virus_gamma_magnitude_multiplier,
prob_recover_gamma_shape,
prob_recover_gamma_scale,
prob_recover_gamma_loc,
prob_recover_gamma_magnitude_multiplier,
prob_virus_kill_host_gamma_shape,
prob_virus_kill_host_gamma_scale,
prob_virus_kill_host_gamma_loc,
prob_virus_kill_host_gamma_magnitude_multiplier,
prob_infectious_no_to_mild_symptom_gamma_shape,
prob_infectious_no_to_mild_symptom_gamma_scale,
prob_infectious_no_to_mild_symptom_gamma_loc,
prob_infectious_no_to_mild_symptom_gamma_magnitude_multiplier,
prob_infectious_no_to_severe_symptom_gamma_shape,
prob_infectious_no_to_severe_symptom_gamma_scale,
prob_infectious_no_to_severe_symptom_gamma_loc,
prob_infectious_no_to_severe_symptom_gamma_magnitude_multiplier,
prob_infectious_no_to_critical_symptom_gamma_shape,
prob_infectious_no_to_critical_symptom_gamma_scale,
prob_infectious_no_to_critical_symptom_gamma_loc,
prob_infectious_no_to_critical_symptom_gamma_magnitude_multiplier,
prob_infectious_mild_to_no_symptom_gamma_shape,
prob_infectious_mild_to_no_symptom_gamma_scale,
prob_infectious_mild_to_no_symptom_gamma_loc,
prob_infectious_mild_to_no_symptom_gamma_magnitude_multiplier,
prob_infectious_mild_to_severe_symptom_gamma_shape,
prob_infectious_mild_to_severe_symptom_gamma_scale,
prob_infectious_mild_to_severe_symptom_gamma_loc,
prob_infectious_mild_to_severe_symptom_gamma_magnitude_multiplier,
prob_infectious_mild_to_critical_symptom_gamma_shape,
prob_infectious_mild_to_critical_symptom_gamma_scale,
prob_infectious_mild_to_critical_symptom_gamma_loc,
prob_infectious_mild_to_critical_symptom_gamma_magnitude_multiplier,
prob_infectious_severe_to_no_symptom_gamma_shape,
prob_infectious_severe_to_no_symptom_gamma_scale,
prob_infectious_severe_to_no_symptom_gamma_loc,
prob_infectious_severe_to_no_symptom_gamma_magnitude_multiplier,
prob_infectious_severe_to_mild_symptom_gamma_shape,
prob_infectious_severe_to_mild_symptom_gamma_scale,
prob_infectious_severe_to_mild_symptom_gamma_loc,
prob_infectious_severe_to_mild_symptom_gamma_magnitude_multiplier,
prob_infectious_severe_to_critical_symptom_gamma_shape,
prob_infectious_severe_to_critical_symptom_gamma_scale,
prob_infectious_severe_to_critical_symptom_gamma_loc,
prob_infectious_severe_to_critical_symptom_gamma_magnitude_multiplier,
prob_infectious_critical_to_no_symptom_gamma_shape,
prob_infectious_critical_to_no_symptom_gamma_scale,
prob_infectious_critical_to_no_symptom_gamma_loc,
prob_infectious_critical_to_no_symptom_gamma_magnitude_multiplier,
prob_infectious_critical_to_mild_symptom_gamma_shape,
prob_infectious_critical_to_mild_symptom_gamma_scale,
prob_infectious_critical_to_mild_symptom_gamma_loc,
prob_infectious_critical_to_mild_symptom_gamma_magnitude_multiplier,
prob_infectious_critical_to_severe_symptom_gamma_shape,
prob_infectious_critical_to_severe_symptom_gamma_scale,
prob_infectious_critical_to_severe_symptom_gamma_loc,
prob_infectious_critical_to_severe_symptom_gamma_magnitude_multiplier,
prob_recovered_no_to_mild_complication,
prob_recovered_no_to_severe_complication,
prob_recovered_mild_to_no_complication,
prob_recovered_mild_to_severe_complication,
prob_recovered_severe_to_no_complication,
prob_recovered_severe_to_mild_complication,
prob_gain_immunity,
hospital_bed_capacity_as_percent_of_population,
hospital_bed_cost_per_day,
icu_bed_capacity_as_percent_of_population,
icu_bed_cost_per_day,
ventilator_capacity_as_percent_of_population,
ventilator_cost_per_day,
drugX_capacity_as_percent_of_population,
drugX_cost_per_day,
)
self.model_reporters_dict.update({'Model params': track_params, 'Run': track_run})
self.datacollector = DataCollector(model_reporters=self.model_reporters_dict)
# parameter lists for each parameter to be tested in batch run
br_params = {
'num_nodes': [500],
'avg_node_degree': [10],
'initial_outbreak_size': [2],
'prob_spread_virus_gamma_shape': [1],
'prob_spread_virus_gamma_scale': [3],
'prob_spread_virus_gamma_loc': [0],
'prob_spread_virus_gamma_magnitude_multiplier': [0.25],
'prob_recover_gamma_shape': [7],
'prob_recover_gamma_scale': [3],
'prob_recover_gamma_loc': [0],
'prob_recover_gamma_magnitude_multiplier': [0.75],
'prob_virus_kill_host_gamma_shape': [5.2],
'prob_virus_kill_host_gamma_scale': [3.2],
'prob_virus_kill_host_gamma_loc': [0],
'prob_virus_kill_host_gamma_magnitude_multiplier': [0.069],
'prob_infectious_no_to_mild_symptom_gamma_shape': [4.1],
'prob_infectious_no_to_mild_symptom_gamma_scale': [1],
'prob_infectious_no_to_mild_symptom_gamma_loc': [0],
'prob_infectious_no_to_mild_symptom_gamma_magnitude_multiplier': [0.75],
'prob_infectious_no_to_severe_symptom_gamma_shape': [1],
'prob_infectious_no_to_severe_symptom_gamma_scale': [2],
'prob_infectious_no_to_severe_symptom_gamma_loc': [0],
'prob_infectious_no_to_severe_symptom_gamma_magnitude_multiplier': [0.1],
'prob_infectious_no_to_critical_symptom_gamma_shape': [1],
'prob_infectious_no_to_critical_symptom_gamma_scale': [2.8],
'prob_infectious_no_to_critical_symptom_gamma_loc': [0],
'prob_infectious_no_to_critical_symptom_gamma_magnitude_multiplier': [0.15],
'prob_infectious_mild_to_no_symptom_gamma_shape': [3],
'prob_infectious_mild_to_no_symptom_gamma_scale': [3],
'prob_infectious_mild_to_no_symptom_gamma_loc': [0],
'prob_infectious_mild_to_no_symptom_gamma_magnitude_multiplier': [0.25],
'prob_infectious_mild_to_severe_symptom_gamma_shape': [4.9],
'prob_infectious_mild_to_severe_symptom_gamma_scale': [2.2],
'prob_infectious_mild_to_severe_symptom_gamma_loc': [0],
'prob_infectious_mild_to_severe_symptom_gamma_magnitude_multiplier': [0.11],
'prob_infectious_mild_to_critical_symptom_gamma_shape': [3.3],
'prob_infectious_mild_to_critical_symptom_gamma_scale': [3.1],
'prob_infectious_mild_to_critical_symptom_gamma_loc': [0],
'prob_infectious_mild_to_critical_symptom_gamma_magnitude_multiplier': [0.11],
'prob_infectious_severe_to_no_symptom_gamma_shape': [3],
'prob_infectious_severe_to_no_symptom_gamma_scale': [2],
'prob_infectious_severe_to_no_symptom_gamma_loc': [0],
'prob_infectious_severe_to_no_symptom_gamma_magnitude_multiplier': [0.001],
'prob_infectious_severe_to_mild_symptom_gamma_shape': [5],
'prob_infectious_severe_to_mild_symptom_gamma_scale': [3],
'prob_infectious_severe_to_mild_symptom_gamma_loc': [0],
'prob_infectious_severe_to_mild_symptom_gamma_magnitude_multiplier': [0.001],
'prob_infectious_severe_to_critical_symptom_gamma_shape': [7],
'prob_infectious_severe_to_critical_symptom_gamma_scale': [3],
'prob_infectious_severe_to_critical_symptom_gamma_loc': [0],
'prob_infectious_severe_to_critical_symptom_gamma_magnitude_multiplier': [0.01],
'prob_infectious_critical_to_no_symptom_gamma_shape': [7],
'prob_infectious_critical_to_no_symptom_gamma_scale': [1],
'prob_infectious_critical_to_no_symptom_gamma_loc': [0],
'prob_infectious_critical_to_no_symptom_gamma_magnitude_multiplier': [0.001],
'prob_infectious_critical_to_mild_symptom_gamma_shape': [4],
'prob_infectious_critical_to_mild_symptom_gamma_scale': [2],
'prob_infectious_critical_to_mild_symptom_gamma_loc': [0],
'prob_infectious_critical_to_mild_symptom_gamma_magnitude_multiplier': [0.001],
'prob_infectious_critical_to_severe_symptom_gamma_shape': [5],
'prob_infectious_critical_to_severe_symptom_gamma_scale': [2],
'prob_infectious_critical_to_severe_symptom_gamma_loc': [0],
'prob_infectious_critical_to_severe_symptom_gamma_magnitude_multiplier': [0.25],
'prob_recovered_no_to_mild_complication': [0.016],
'prob_recovered_no_to_severe_complication': [0],
'prob_recovered_mild_to_no_complication': [0.02],
'prob_recovered_mild_to_severe_complication': [0.02],
'prob_recovered_severe_to_no_complication': [0.001],
'prob_recovered_severe_to_mild_complication': [0.001],
'prob_gain_immunity': [0.005],
'hospital_bed_capacity_as_percent_of_population': [0.10],
'hospital_bed_cost_per_day': [2000],
'icu_bed_capacity_as_percent_of_population': [0.10],
'icu_bed_cost_per_day': [3000],
'ventilator_capacity_as_percent_of_population': [0.1],
'ventilator_cost_per_day': [100],
'drugX_capacity_as_percent_of_population': [0.1],
'drugX_cost_per_day': [20],
}
start_date = datetime.datetime(2020, 2, 20) # Setting
num_iterations = 1 # Setting
num_max_steps_in_reality = 95 # Setting
num_max_steps_in_simulation = 165 # Setting
end_date_in_reality = start_date + datetime.timedelta(days=num_max_steps_in_reality) # 2020-05-25
end_date_in_simulation = start_date + datetime.timedelta(days=num_max_steps_in_simulation) # 2020-09-22 if num_max_steps_in_simulation == 215
try:
br = BatchRunnerMP(BatchHostNetwork,
br_params,
iterations=num_iterations,
max_steps=num_max_steps_in_simulation,
model_reporters={'Data Collector': lambda m: m.datacollector})
except Exception as e:
print('Multiprocessing batch run not applied, reason as:', e)
br = CustomBatchRunner(BatchHostNetwork,
br_params,
iterations=num_iterations,
max_steps=num_max_steps_in_simulation,
model_reporters={'Data Collector': lambda m: m.datacollector})
def main(on_switch=False, graph_switch=False, stats_test_switch=False, save_switch=False,
realworld_prediction_switch=False, filename_tag=''):
if on_switch:
br.run_all()
br_df = br.get_model_vars_dataframe()
br_step_data = pd.DataFrame()
for i in range(len(br_df['Data Collector'])):
if isinstance(br_df['Data Collector'][i], DataCollector):
print('>>>>> Run #{}'.format(i))
i_run_data = br_df['Data Collector'][i].get_model_vars_dataframe()
i_run_data['Date'] = i_run_data.apply(lambda row: convert_time_to_date(row, 'Time', start_date), axis=1)
br_step_data = br_step_data.append(i_run_data, ignore_index=True)
model_param = i_run_data['Model params'][0]
df_real = prepare_realworld_data().copy()
df_real['date_formatted'] = pd.to_datetime(df_real['date_formatted'])
df_real.sort_values(by=['date_formatted'])
df_sim = i_run_data.copy()
df_sim['Date'] = pd.to_datetime(df_sim['Date'])
df_sim.sort_values(by=['Date'])
df_merged = pd.merge(df_real, df_sim, how='outer', left_on=['date_formatted'],
right_on=['Date'])
if graph_switch:
print('>> For graphs')
print('Model param:', model_param)
graphing(df=df_merged)
if stats_test_switch:
print('>> For statistical tests')
print('Model param:', model_param)
df_merged_sliced = df_merged[(df_merged['date_formatted'] >= start_date)
& (df_merged['date_formatted'] <= end_date_in_reality)]
statistical_test_validation(df=df_merged_sliced)
if realworld_prediction_switch:
print('>> For real-world predictions')
print('Model param:', model_param)
df_merged = predict_by_percent_change_of_another_col(
df=df_merged,
predicted_col='cumulative_cases',
feature_col='Cumulative test-confirmed infectious'
)
df_merged = predict_by_percent_change_of_another_col(
df=df_merged,
predicted_col='cumulative_deaths',
feature_col='Cumulative test-confirmed dead'
)
df_merged = predict_by_percent_change_of_another_col(
df=df_merged,
predicted_col='active_cases',
feature_col='Test-confirmed infectious'
)
br_step_data['File ID'] = filename_tag
if save_switch:
br_step_data.to_csv(os.getcwd() +
'\\project_result\\disease_model_step_data{}_p{}.csv'.format(filename_tag, i),
index=False)
df_merged.to_csv(os.getcwd() +
'\\project_result\\disease_model_merged_data{}_p{}.csv'.format(filename_tag, i),
index=False)
# Helper functions
curr_dir = os.getcwd()
covid19_dir = '\\data\Covid19Canada'
covid19_timeseries_prov_dir = covid19_dir+'\\timeseries_prov'
cases_timeseries_filename = 'cases_timeseries_prov.csv'
mortality_timeseries_filename = 'mortality_timeseries_prov.csv'
overall_timeseries_filename = 'active_timeseries_prov.csv'
testing_timeseries_filename = 'testing_timeseries_prov.csv'
project_result_dir = '\\project_result'
output_real_data_filename = 'realworldCovid19_step_data_processed.csv'
popn_factor = 1000000 # Setting
def convert_time_to_date(row, var, start_date):
current_date = start_date + datetime.timedelta(days=(int(row[var]-1)))
return current_date
def get_realworld_data():
path_overall = curr_dir+covid19_timeseries_prov_dir+'\\'+overall_timeseries_filename
path_testing = curr_dir+covid19_timeseries_prov_dir+'\\'+testing_timeseries_filename
df_overall = pd.read_csv(path_overall, encoding='utf-8', low_memory=False)
df_overall.rename(columns={'date_active': 'date'}, inplace=True)
df_testing = pd.read_csv(path_testing, encoding='utf-8', low_memory=False)
df_testing.rename(columns={'date_testing': 'date'}, inplace=True)
df_merged = pd.merge(df_overall, df_testing, on=['province', 'date'], how='outer')
df_merged['testing'].fillna(0, inplace=True)
df_merged['cumulative_testing'].fillna(0, inplace=True)
del df_merged['testing_info']
return df_merged
def prepare_realworld_data():
df_canada = get_realworld_data().copy()
# Restrict location
prov = 'Alberta'
if prov == 'Alberta':
prov_popn = 4.41 * 1000000 # Source: https://economicdashboard.alberta.ca/Population
df = df_canada[df_canada['province'] == 'Alberta']
# Restrict date range
df['date_formatted'] = pd.to_datetime(df['date'], format='%d-%m-%Y')
df = df[(df['date_formatted'] >= start_date) & (df['date_formatted'] <= end_date_in_reality)]
# Additional calculations
df['total_n'] = prov_popn
df['rate_per_1M_cumulative_test_done'] = df.apply(get_proportion,
numerator='cumulative_testing',
denominator='total_n',
multiplier = popn_factor,
axis=1)
df['rate_per_1M_cumulative_test-confirmed_infectious'] = df.apply(get_proportion,
numerator='cumulative_cases',
denominator='total_n',
multiplier = popn_factor,
axis=1)
df['rate_per_1M_cumulative_test-confirmed_dead'] = df.apply(get_proportion,
numerator='cumulative_deaths',
denominator='total_n',
multiplier = popn_factor,
axis=1)
return df
def graphing(df):
display_vars_for_df_real = ['rate_per_1M_cumulative_test_done',
'rate_per_1M_cumulative_test-confirmed_infectious',
'rate_per_1M_cumulative_test-confirmed_dead',
]
display_vars_for_df_sim = [
'Rate per 1M cumulative test done',
'Rate per 1M cumulative infectious',
'Rate per 1M cumulative test-confirmed infectious',
'Rate per 1M cumulative dead',
'Rate per 1M cumulative test-confirmed dead',
]
for var in display_vars_for_df_real:
sns.lineplot(x='date_formatted', y=var, data=df)
plt.xticks(rotation=15)
plt.title('Title: Real-world '+var)
plt.show()
for var in display_vars_for_df_sim:
sns.lineplot(x='Date', y=var, data=df)
plt.xticks(rotation=15)
plt.title('Title: Simulated '+var)
plt.show()
def statistical_test_validation(df):
maxlag = 10
granger_test = 'ssr_ftest' # options are 'params_ftest', 'ssr_ftest', 'ssr_chi2test', and 'lrtest'
var_pair_p1 = ['rate_per_1M_cumulative_test_done',
'Rate per 1M cumulative test done']
var_pair_p2 = ['rate_per_1M_cumulative_test-confirmed_infectious',
'Rate per 1M cumulative test-confirmed infectious']
var_pair_p3 = ['rate_per_1M_cumulative_test-confirmed_dead',
'Rate per 1M cumulative test-confirmed dead']
granger_test_result_p1 = grangercausalitytests(df[var_pair_p1], maxlag=maxlag, verbose=False)
granger_test_result_p2 = grangercausalitytests(df[var_pair_p2], maxlag=maxlag, verbose=False)
granger_test_result_p3 = grangercausalitytests(df[var_pair_p3], maxlag=maxlag, verbose=False)
granger_p_values_p1 = [round(granger_test_result_p1[i + 1][0][granger_test][1], 4) for i in range(maxlag)]
granger_min_p_value_p1 = np.min(granger_p_values_p1)
granger_max_p_value_p1 = np.max(granger_p_values_p1)
granger_mean_p_value_p1 = np.mean(granger_p_values_p1)
granger_p_values_p2 = [round(granger_test_result_p2[i + 1][0][granger_test][1], 4) for i in range(maxlag)]
granger_min_p_value_p2 = np.min(granger_p_values_p2)
granger_max_p_value_p2 = np.max(granger_p_values_p2)
granger_mean_p_value_p2 = np.mean(granger_p_values_p2)
granger_p_values_p3 = [round(granger_test_result_p3[i + 1][0][granger_test][1], 4) for i in range(maxlag)]
granger_min_p_value_p3 = np.min(granger_p_values_p3)
granger_max_p_value_p3 = np.max(granger_p_values_p3)
granger_mean_p_value_p3 = np.mean(granger_p_values_p3)
print('p-value of {}: min={}, max={}, mean={}'.format(granger_test, granger_min_p_value_p1,
granger_max_p_value_p1, granger_mean_p_value_p1))
print('p-value of {}: min={}, max={}, mean={}'.format(granger_test, granger_min_p_value_p2,
granger_max_p_value_p2, granger_mean_p_value_p2))
print('p-value of {}: min={}, max={}, mean={}'.format(granger_test, granger_min_p_value_p3,
granger_max_p_value_p3, granger_mean_p_value_p3))
pearson_r1, pearson_p1 = stats.pearsonr(df.dropna()[var_pair_p1[0]], df.dropna()[var_pair_p1[1]])
pearson_r2, pearson_p2 = stats.pearsonr(df.dropna()[var_pair_p2[0]], df.dropna()[var_pair_p2[1]])
pearson_r3, pearson_p3 = stats.pearsonr(df.dropna()[var_pair_p3[0]], df.dropna()[var_pair_p3[1]])
print(f'Pearson r: {pearson_r1} and p-value: {pearson_p1}')
print(f'Pearson r: {pearson_r2} and p-value: {pearson_p2}')
print(f'Pearson r: {pearson_r3} and p-value: {pearson_p3}')
def predict_by_percent_change_of_another_col(df, predicted_col, feature_col):
pct_chg_col = feature_col+' percent change'
df[pct_chg_col] = (df[feature_col] - df[feature_col].shift(1).fillna(0))/\
df[feature_col].shift(1).fillna(0)
empty_cells = df[predicted_col].isna()
percents = df[pct_chg_col].where(empty_cells, 0) + 1
df[predicted_col] = df[predicted_col].ffill() * percents.cumprod()
return df
def get_proportion(df, numerator, denominator, multiplier=1):
try:
return (df[numerator]/df[denominator])*multiplier
except ZeroDivisionError:
return math.inf
def pandas_output_setting():
| pd.set_option('display.max_rows', 500) | pandas.set_option |
import os
from hilbertcurve.hilbertcurve import HilbertCurve
from pyqtree import Index
import pickle
import sys
import math
import json
import pandas
from epivizfileserver.parser import BigWig
import struct
class QuadTreeManager(object):
def __init__(self, genome, max_items = 128, base_path = os.getcwd()):
self.file_mapping = {}
self.file_objects = {}
# self.file_chrids = {}
self.genome = genome
self.max_items = max_items
self.base_path = base_path
self.file_counter = 0
def hcoords(self, x, chromLength, dims = 2):
hlevel = math.ceil(math.log2(chromLength)/dims)
# print("hlevel, ", hlevel)
hilbert_curve = HilbertCurve(hlevel, dims)
[x,y] = hilbert_curve.coordinates_from_distance(x)
return x, y, hlevel
def get_file_btree(self, file, zoomlvl):
bw = BigWig(file)
bw.getZoomHeader()
tree = bw.getTree(-2)
return tree, bw
def read_node(self, tree, offset, endian="="):
data = tree[offset:offset + 4]
(rIsLeaf, rReserved, rCount) = struct.unpack(endian + "BBH", data)
return {"rIsLeaf": rIsLeaf, "rCount": rCount, "rOffset": offset + 4}
def traverse_nodes(self, node, zoomlvl = -2, tree = None, result = [], fullIndexOffset = None, endian="="):
offset = node.get("rOffset")
if node.get("rIsLeaf"):
for i in range(0, node.get("rCount")):
data = tree[offset + (i * 32) : offset + ( (i+1) * 32 )]
(rStartChromIx, rStartBase, rEndChromIx, rEndBase, rdataOffset, rDataSize) = struct.unpack(endian + "IIIIQQ", data)
result.append((rStartChromIx, rStartBase, rEndChromIx, rEndBase, rdataOffset, rDataSize))
else:
for i in range(0, node.get("rCount")):
data = tree[offset + (i * 24) : offset + ( (i+1) * 24 )]
(rStartChromIx, rStartBase, rEndChromIx, rEndBase, rdataOffset) = struct.unpack(endian + "IIIIQ", data)
# remove index offset since the stored binary starts from 0
diffOffset = fullIndexOffset
childNode = self.read_node(tree, rdataOffset - diffOffset, endian)
self.traverse_nodes(childNode, zoomlvl, result=result, tree = tree,
fullIndexOffset = fullIndexOffset, endian = endian)
return result
def get_leaf_nodes(self, tree, bw, zoomlvl):
findexOffset = bw.header.get("fullIndexOffset")
offset = 48
root = self.read_node(tree, offset, endian = bw.endian)
records = self.traverse_nodes(root, zoomlvl, tree = tree, fullIndexOffset = findexOffset, endian = bw.endian)
df = pandas.DataFrame(records, columns=["rStartChromIx", "rStartBase", "rEndChromIx", "rEndBase",
"rdataOffset", "rDataSize"])
return df
def get_file_chr(self, bw):
bw.getId("chr1")
return bw.chrmIds
def add_to_index(self, file, zoomlvl = -2):
tree, bw = self.get_file_btree(file, zoomlvl)
df = self.get_leaf_nodes(tree, bw, zoomlvl)
chrmTree = self.get_file_chr(bw)
self.file_mapping[file] = self.file_counter
self.file_counter += 1
self.file_objects[file] = bw
for chrm in chrmTree.keys():
chromLength = self.genome[chrm]
dims = 2
hlevel = math.ceil(math.log2(chromLength)/dims)
# print("hlevel", hlevel)
x_y_dim = math.ceil(math.pow(2, hlevel))
# print("max x|y =", x_y_dim)
tree = Index(bbox=(0, 0, x_y_dim, x_y_dim), disk = base_path + "quadtree." + chrm + ".index")
chrmId = chrmTree[chrm]
df = df[df["rStartChromIx"] == chrmId]
# print("\t df shape - ", df.shape)
for i, row in df.iterrows():
x, y, _ = hcoords(row["rStartBase"], chromLength)
tree.insert((row["rStartBase"], row["rEndBase"], row["rdataOffset"], row["rDataSize"], fileIds[file]), (x, y, x+1, y+1))
def query(self, file, chr, start, end, zoomlvl = -2):
chromLength = self.genome[chr]
dims = 2
hlevel = math.ceil(math.log2(chromLength)/dims)
# print("hlevel", hlevel)
x_y_dim = math.ceil(math.pow(2, hlevel))
# print("max x|y =", x_y_dim)
tree = Index(bbox=(0, 0, x_y_dim, x_y_dim), disk = base_path + "quadtree." + chr + ".index")
xstart, ystart, _ = hcoords(start, chromLength)
xend, yend, _ = hcoords(end, chromLength)
overlapbbox = (start - 1, start - 1, end + 1, end + 1)
matches = tree.intersect(overlapbbox)
df = | pandas.DataFrame(matches, columns=["start", "end", "offset", "size", "fileid"]) | pandas.DataFrame |
from snsql import *
import pandas as pd
import numpy as np
privacy = Privacy(epsilon=3.0, delta=0.1)
class TestPreAggregatedSuccess:
# Test input checks for pre_aggregated
def test_list_success(self, test_databases):
# pass in properly formatted list
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="pandas"
)
if priv:
res = priv.execute(query, pre_aggregated=pre_aggregated)
assert(str(res[1][0]) == '1') # it's sorted
def test_pandas_success(self, test_databases):
# pass in properly formatted dataframe
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
colnames = pre_aggregated[0]
pre_aggregated = pd.DataFrame(data=pre_aggregated[1:], index=None)
pre_aggregated.columns = colnames
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="pandas"
)
if priv:
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
res = priv.execute(query, pre_aggregated=pre_aggregated)
assert(str(res[1][0]) == '1') # it's sorted
def test_pandas_success_df(self, test_databases):
# pass in properly formatted dataframe
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
colnames = pre_aggregated[0]
pre_aggregated = pd.DataFrame(data=pre_aggregated[1:], index=None)
pre_aggregated.columns = colnames
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="pandas"
)
if priv:
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
res = priv.execute_df(query, pre_aggregated=pre_aggregated)
assert(str(res['sex'][0]) == '1') # it's sorted
def test_np_ndarray_success(self, test_databases):
# pass in properly formatted dataframe
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
colnames = pre_aggregated[0]
pre_aggregated = | pd.DataFrame(data=pre_aggregated[1:], index=None) | pandas.DataFrame |
import datasets
import training
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
import time
from itertools import combinations
def run_linear_regression_model(train_query_x, test_query_x, train_y, test_y, real_x, real_y):
start_time = time.time()
reg_map = LinearRegression().fit(train_query_x, train_y)
end_time = time.time()
duration = end_time - start_time
print("--- Trained in %s seconds ---" % duration)
pred_y = reg_map.predict(test_query_x)
diff = pred_y.flatten() - test_y
percent_diff = (diff / test_y)
abs_percent_diff = np.abs(percent_diff)
synthetic_mean = np.mean(abs_percent_diff)
synthetic_std = np.std(abs_percent_diff)
print ('mean = {}, std = {}'.format(synthetic_mean, synthetic_std))
pred_y = reg_map.predict(real_x)
diff = pred_y.flatten() - real_y
percent_diff = (diff / test_y)
abs_percent_diff = np.abs(percent_diff)
real_mean = np.mean(abs_percent_diff)
real_std = np.std(abs_percent_diff)
print ('mean = {}, std = {}'.format(real_mean, real_std))
return duration, synthetic_mean, synthetic_std, real_mean, real_std
def main():
print ("Train and test a prediction model for SE problem using baseline model")
output_f = open('data/output_accuracy_prediction_baseline.csv', 'w')
query_attributes_files_list = []
distributions = ['uniform', 'diagonal', 'gauss', 'parcel', 'combo']
for r in range(1, len(distributions) + 1):
groups = combinations(distributions, r)
for g in groups:
query_attributes_files = []
for dist in g:
query_attributes_files.append('data/query_accuracies_{}.csv'.format(dist))
query_attributes_files_list.append(query_attributes_files)
query_attributes_all = datasets.load_query_attributes_from_multiple_files(['data/query_accuracies_all.csv'])
train_query_attributes_all, test_query_attributes_all = train_test_split(
query_attributes_all, test_size=0.25, random_state=42)
for query_attributes_files in query_attributes_files_list:
query_attributes = datasets.load_query_attributes_from_multiple_files(query_attributes_files)
train_query_attributes, test_query_attributes = train_test_split(
query_attributes, test_size=0.25, random_state=42)
real_query_attributes = datasets.load_query_attributes('data/lakes_query_accuracies.csv')
# train_query_x = pd.DataFrame.to_numpy(train_query_attributes[['sampling_budget', 'query_ratio']])
# test_query_x = pd.DataFrame.to_numpy(test_query_attributes_all[['sampling_budget', 'query_ratio']])
# train_y = train_query_attributes['mean_accuracy']
# test_y = test_query_attributes_all['mean_accuracy']
# real_x = pd.DataFrame.to_numpy(real_query_attributes[['sampling_budget', 'query_ratio']])
# real_y = real_query_attributes['mean_accuracy']
train_query_x = pd.DataFrame.to_numpy(train_query_attributes[['query_ratio', 'mean_accuracy']])
test_query_x = pd.DataFrame.to_numpy(test_query_attributes_all[['query_ratio', 'mean_accuracy']])
train_y = train_query_attributes['sampling_budget']
test_y = test_query_attributes_all['sampling_budget']
real_x = | pd.DataFrame.to_numpy(real_query_attributes[['query_ratio', 'mean_accuracy']]) | pandas.DataFrame.to_numpy |
import numpy as np
import pandas as pd
from classifier2 import what1, what
import matplotlib.pyplot as plt
"""
dataframe that provides very good info about individual classes!
"""
#v = what['value(in%)'].cumsum()
what2 = what1.join(what['value(in%)'])
what3 = what2.sort_values(by='value(in%)',ascending= False)
what4 = what3.set_index('SKU',drop=True,inplace=False)
what5 = pd.DataFrame(what4)
what6 = pd.concat([what5['value(in%)']]).cumsum() #.join(what5['Name'], what5['Quantity'], what5['Unit_cost'])
what7 = pd.concat([what6, what5['Quantity'], what5['Unit_cost'], what5['Category'], what5['SubCategory']], axis=1)
#print(what7)
classAA = what7[(what7['value(in%)'] <= 40)]
classA = what7[(what7['value(in%)'] > 40) & (what7['value(in%)']<80)]
classB = what7[(what7['value(in%)'] > 80) & (what7['value(in%)']<90)]
classC = what7[(what7['value(in%)'] > 90) & (what7['value(in%)'] <99)]
classD = what7[(what7['value(in%)'] > 99) & (what7['value(in%)'] <102)]
classAA = | pd.DataFrame(classAA) | pandas.DataFrame |
import argparse
import os
import warnings
import boto3, time, json, warnings, os, re
import urllib.request
from datetime import date, timedelta
import numpy as np
import pandas as pd
import geopandas as gpd
from multiprocessing import Pool
# the train test split date is used to split each time series into train and test sets
train_test_split_date = date.today() - timedelta(days = 30)
# the sampling frequency determines the number of hours per sample
# and is used for aggregating and filling missing values
frequency = '1'
warnings.filterwarnings('ignore')
def athena_create_table(bucket_name, query_file, wait=None):
create_table_uri = athena_execute(bucket_name, query_file, 'txt', wait)
return create_table_uri
def athena_query_table(bucket_name, query_file, wait=None):
results_uri = athena_execute(bucket_name, query_file, 'csv', wait)
return results_uri
def athena_execute(bucket_name, query_file, ext, wait):
with open(query_file) as f:
query_str = f.read()
athena = boto3.client('athena')
s3_dest = f's3://{bucket_name}/athena/results/'
query_id = athena.start_query_execution(
QueryString= query_str,
ResultConfiguration={'OutputLocation': s3_dest}
)['QueryExecutionId']
results_uri = f'{s3_dest}{query_id}.{ext}'
start = time.time()
while wait == None or wait == 0 or time.time() - start < wait:
result = athena.get_query_execution(QueryExecutionId=query_id)
status = result['QueryExecution']['Status']['State']
if wait == 0 or status == 'SUCCEEDED':
break
elif status in ['QUEUED','RUNNING']:
continue
else:
raise Exception(f'query {query_id} failed with status {status}')
time.sleep(3)
return results_uri
def map_s3_bucket_path(s3_uri):
pattern = re.compile('^s3://([^/]+)/(.*?([^/]+)/?)$')
value = pattern.match(s3_uri)
return value.group(1), value.group(2)
def get_sydney_openaq_data(bucket_name, sql_query_file_path = "/opt/ml/processing/sql/sydney.dml"):
query_results_uri = athena_query_table(bucket_name, sql_query_file_path)
print (f'reading {query_results_uri}')
bucket_name, key = map_s3_bucket_path(query_results_uri)
print(f'bucket: {bucket_name}; with key: {key}')
local_result_file = 'result.csv'
s3 = boto3.resource('s3')
s3.Bucket(bucket_name).download_file(key, local_result_file)
raw = pd.read_csv(local_result_file, parse_dates=['timestamp'])
return raw
def featurize(raw):
def fill_missing_hours(df):
df = df.reset_index(level=categorical_levels, drop=True)
index = pd.date_range(df.index.min(), df.index.max(), freq='1H')
return df.reindex(pd.Index(index, name='timestamp'))
# Sort and index by location and time
categorical_levels = ['country', 'city', 'location', 'parameter']
index_levels = categorical_levels + ['timestamp']
indexed = raw.sort_values(index_levels, ascending=True)
indexed = indexed.set_index(index_levels)
# indexed.head()
# Downsample to hourly samples by maximum value
downsampled = indexed.groupby(categorical_levels + [pd.Grouper(level='timestamp', freq='1H')]).max()
# Back fill missing values
filled = downsampled.groupby(level=categorical_levels).apply(fill_missing_hours)
#filled[filled['value'].isnull()].groupby('location').count().describe()
filled['value'] = filled['value'].interpolate().round(2)
filled['point_latitude'] = filled['point_latitude'].fillna(method='pad')
filled['point_longitude'] = filled['point_longitude'].fillna(method='pad')
# Create Features
aggregated = filled.reset_index(level=4)\
.groupby(level=categorical_levels)\
.agg(dict(timestamp='first', value=list, point_latitude='first', point_longitude='first'))\
.rename(columns=dict(timestamp='start', value='target'))
aggregated['id'] = np.arange(len(aggregated))
aggregated.reset_index(inplace=True)
aggregated.set_index(['id']+categorical_levels, inplace=True)
# Add Categorical features
level_ids = [level+'_id' for level in categorical_levels]
for l in level_ids:
aggregated[l], index = pd.factorize(aggregated.index.get_level_values(l[:-3]))
aggregated['cat'] = aggregated.apply(lambda columns: [columns[l] for l in level_ids], axis=1)
features = aggregated.drop(columns=level_ids+ ['point_longitude', 'point_latitude'])
features.reset_index(level=categorical_levels, inplace=True, drop=True)
return features
def filter_dates(df, min_time, max_time, frequency):
min_time = None if min_time is None else pd.to_datetime(min_time)
max_time = None if max_time is None else | pd.to_datetime(max_time) | pandas.to_datetime |
import pytest
from mapping import mappings
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
from pandas.tseries.offsets import BDay
@pytest.fixture
def dates():
return pd.Series(
[TS('2016-10-20'), TS('2016-11-21'), TS('2016-12-20')],
index=['CLX16', 'CLZ16', 'CLF17']
)
def test_not_in_roll_one_generic_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates.iloc[0:2]
sd, ed = (dt + BDay(-8), dt + BDay(-7))
timestamps = pd.date_range(sd, ed, freq='b')
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
trans = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
midx = pd.MultiIndex.from_product([timestamps, ['CLX16']])
midx.names = ['date', 'contract']
cols = pd.Index([0], name='generic')
wts_exp = pd.DataFrame([1.0, 1.0], index=midx, columns=cols)
# with DatetimeIndex
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition, transition=trans)
assert_frame_equal(wts, wts_exp)
# with tuple
wts = mappings.roller(tuple(timestamps), contract_dates,
mappings.static_transition, transition=trans)
assert_frame_equal(wts, wts_exp)
def test_not_in_roll_one_generic_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_non_numeric_column_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([["CL1"], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [("CL1", 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_finished_roll_pre_expiry_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-2)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-9, -8]
transition = pd.DataFrame([[1.0, 0.0], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_not_in_roll_one_generic_filtering_front_contracts_static_transition(dates): # NOQA
contract_dates = dates.iloc[0:2]
ts = dates.iloc[1] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_roll_with_holiday(dates):
contract_dates = dates.iloc[-2:]
ts = pd.Timestamp("2016-11-17")
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
holidays = [np.datetime64("2016-11-18")]
# the holiday moves the roll schedule up one day, since Friday is
# excluded as a day
wts = mappings.static_transition(ts, contract_dates, transition,
holidays)
wts_exp = [(0, 'CLZ16', 0.5, ts), (0, 'CLF17', 0.5, ts)]
assert wts == wts_exp
def test_not_in_roll_one_generic_zero_weight_back_contract_no_contract_static_transition(dates): # NOQA
contract_dates = dates.iloc[0:1]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_aggregate_weights():
ts = pd.Timestamp("2015-01-01")
wts_list = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
wts = mappings.aggregate_weights(wts_list)
idx = pd.MultiIndex.from_product([[ts], ["CLX16", "CLZ16"]],
names=["date", "contract"])
cols = pd.Index([0, 1], name="generic")
wts_exp = pd.DataFrame([[1.0, 0], [0, 1.0]], index=idx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_aggregate_weights_drop_date():
ts = pd.Timestamp("2015-01-01")
wts_list = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
wts = mappings.aggregate_weights(wts_list, drop_date=True)
idx = pd.Index(["CLX16", "CLZ16"], name="contract")
cols = pd.Index([0, 1], name="generic")
wts_exp = pd.DataFrame([[1.0, 0], [0, 1.0]], index=idx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_static_bad_transitions(dates):
contract_dates = dates.iloc[[0]]
ts = dates.iloc[0] + BDay(-8)
# transition does not contain 'front' column
cols = pd.MultiIndex.from_product([[0], ['not_front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
# transition does not sum to one across rows
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.0], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
# transition is not monotonic increasing in back
transition = pd.DataFrame([[0.7, 0.3], [0.8, 0.2], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
def test_no_roll_date_two_generics_static_transition(dates):
dt = dates.iloc[0]
contract_dates = dates
ts = dt + BDay(-8)
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_not_in_roll_two_generics_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates.iloc[0:3]
sd, ed = (dt + BDay(-8), dt + BDay(-7))
timestamps = pd.date_range(sd, ed, freq='b')
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition,
transition=transition)
midx = pd.MultiIndex.from_product([timestamps, ['CLX16', 'CLZ16']])
midx.names = ['date', 'contract']
cols = pd.Index([0, 1], name='generic')
wts_exp = pd.DataFrame([[1.0, 0.0], [0.0, 1.0],
[1.0, 0.0], [0.0, 1.0]], index=midx,
columns=cols)
assert_frame_equal(wts, wts_exp)
def test_during_roll_two_generics_one_day_static_transition(dates):
contract_dates = dates
ts = dates.iloc[0] + BDay(-1)
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 0.5, ts), (0, 'CLZ16', 0.5, ts),
(1, 'CLZ16', 0.5, ts), (1, 'CLF17', 0.5, ts)]
assert wts == wts_exp
def test_invalid_contract_dates():
ts = [pd.Timestamp("2016-10-19")]
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-1, 0]
trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
non_unique_index = pd.Series([pd.Timestamp('2016-10-20'),
pd.Timestamp('2016-11-21')],
index=['instr1', 'instr1'])
with pytest.raises(ValueError):
mappings.roller(ts, non_unique_index, mappings.static_transition,
transition=trans)
non_unique_vals = pd.Series([pd.Timestamp('2016-10-20'),
pd.Timestamp('2016-10-20')],
index=['instr1', 'instr2'])
with pytest.raises(ValueError):
mappings.roller(ts, non_unique_vals, mappings.static_transition,
transition=trans)
non_monotonic_vals = pd.Series([pd.Timestamp('2016-10-20'),
pd.Timestamp('2016-10-19')],
index=['instr1', 'instr2'])
with pytest.raises(ValueError):
mappings.static_transition(ts[0], non_monotonic_vals, transition=trans)
not_enough_vals = pd.Series([pd.Timestamp('2016-10-19')],
index=['instr1'])
with pytest.raises(IndexError):
mappings.static_transition(ts[0], not_enough_vals, transition=trans)
def test_during_roll_two_generics_one_day_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates
timestamps = pd.DatetimeIndex([dt + BDay(-1)])
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition, transition=trans)
midx = pd.MultiIndex.from_product([timestamps,
['CLF17', 'CLX16', 'CLZ16']])
midx.names = ['date', 'contract']
cols = pd.Index([0, 1], name='generic')
wts_exp = pd.DataFrame([[0, 0.5], [0.5, 0], [0.5, 0.5]],
index=midx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_whole_roll_roll_two_generics_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates
timestamps = pd.DatetimeIndex([dt + BDay(-2), dt + BDay(-1), dt])
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
trans = pd.DataFrame([[1, 0, 1, 0], [0.5, 0.5, 0.5, 0.5],
[0, 1, 0, 1]],
index=idx, columns=cols)
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition, transition=trans)
midx = pd.MultiIndex.from_tuples([(timestamps[0], 'CLX16'),
(timestamps[0], 'CLZ16'),
(timestamps[1], 'CLF17'),
(timestamps[1], 'CLX16'),
(timestamps[1], 'CLZ16'),
(timestamps[2], 'CLF17'),
(timestamps[2], 'CLZ16')])
midx.names = ['date', 'contract']
cols = pd.Index([0, 1], name='generic')
wts_exp = pd.DataFrame([[1, 0], [0, 1], [0, 0.5], [0.5, 0], [0.5, 0.5],
[0, 1], [1, 0]],
index=midx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_roll_to_roll_two_generics():
contract_dates = pd.Series(
[TS('2016-10-10'), TS('2016-10-13'), TS('2016-10-17'), TS('2016-10-20')],
index=['A', 'B', 'C', 'D']
)
timestamps = pd.date_range(contract_dates.iloc[0] + BDay(-2),
contract_dates.iloc[1], freq='b')
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
trans = pd.DataFrame([[1, 0, 1, 0], [0.5, 0.5, 0.5, 0.5],
[0, 1, 0, 1]], index=idx, columns=cols)
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition, transition=trans)
midx = pd.MultiIndex.from_tuples([(timestamps[0], 'A'),
(timestamps[0], 'B'),
(timestamps[1], 'A'),
(timestamps[1], 'B'),
(timestamps[1], 'C'),
(timestamps[2], 'B'),
(timestamps[2], 'C'),
(timestamps[3], 'B'),
(timestamps[3], 'C'),
(timestamps[4], 'B'),
(timestamps[4], 'C'),
(timestamps[4], 'D'),
(timestamps[5], 'C'),
(timestamps[5], 'D')])
midx.names = ['date', 'contract']
cols = pd.Index([0, 1], name='generic')
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1],
[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
wts_exp = pd.DataFrame(vals, index=midx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_to_generics_two_generics_exact_soln():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=['CLX16', 'CLZ16', 'CLF17'],
columns=[0, 1])
instrs = pd.Series([10, 20, 10], index=["CLX16", "CLZ16", "CLF17"])
generics = mappings.to_generics(instrs, wts)
exp_generics = pd.Series([20.0, 20.0], index=[0, 1])
assert_series_equal(generics, exp_generics)
def test_to_generics_two_generics_exact_soln_negative():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=['CLX16', 'CLZ16', 'CLF17'],
columns=[0, 1])
instrs = pd.Series([10, 0, -10], index=["CLX16", "CLZ16", "CLF17"])
generics = mappings.to_generics(instrs, wts)
exp_generics = pd.Series([20.0, -20.0], index=[0, 1])
assert_series_equal(generics, exp_generics)
def test_to_generics_two_generics_zero_generics_weight():
# scenario where one generic has 0 weight, tests for bug where result
# has epsilon weight on CL1
wts = pd.DataFrame([[0, 1]], index=["CLZ16"], columns=["CL1", "CL2"])
notional = pd.Series([-13900.0], index=["CLZ16"])
generics = mappings.to_generics(notional, wts)
exp_generics = pd.Series([-13900.0], index=["CL2"])
assert_series_equal(generics, exp_generics)
def test_to_generics_two_generics_minimize_error_non_integer_soln():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=['CLX16', 'CLZ16', 'CLF17'],
columns=[0, 1])
instrs = | pd.Series([10, 20, 11], index=["CLX16", "CLZ16", "CLF17"]) | pandas.Series |
import time
import copy
from lxml import html
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from datetime import datetime
from selenium import webdriver
def get_benzinga_data(stock, days_to_look_back):
ffox_options = webdriver.FirefoxOptions()
minimum_date = pd.Timestamp(datetime.utcnow(), tz = 'UTC') - pd.Timedelta('{} days'.format(days_to_look_back))
ffox_options.set_headless()
tol_amount = 5
tols_curr = 0
bp = False
ff = webdriver.Firefox(options = ffox_options)
try:
ff.get('https://benzinga.com/stock/{}'.format(stock.lower()))
time.sleep(5)
analyst_ratings = []
current_index = 1
while True:
try:
elem = ff.find_element_by_xpath('/html/body/div[6]/div/div[2]/div[2]/div[1]/div/div[7]/div/div/div[1]/a/span[1]')
ff.execute_script("arguments[0].scrollIntoView();", elem)
time.sleep(0.3)
elem.click()
time.sleep(2)
while True:
try:
header = '/html/body/div[6]/div/div[2]/div[2]/div[1]/div/div[7]/div/div/div[1]/ul/li[{}]/a'.format(current_index)
headline = ff.find_element_by_xpath(header).text
url = ff.find_element_by_xpath(header).get_attribute('href')
publisher = ff.find_element_by_xpath('/html/body/div[6]/div/div[2]/div[2]/div[1]/div/div[7]/div/div/div[1]/ul/li[{}]/span[1]'.format(current_index)).text
date = ff.find_element_by_xpath('/html/body/div[6]/div/div[2]/div[2]/div[1]/div/div[7]/div/div/div[1]/ul/li[{}]/span[2]'.format(current_index)).text
# print(date)
# print([headline, date])
if ('-0400' not in date) & ('-0500' not in date):
# # print(date)
if 'ago' in date:
if date == 'a day ago':
pass
else:
# # print(date)
timeperiod = date[::-1]
timeperiod = timeperiod[timeperiod.find(' ') + 1:][::-1]
timeperiod = | pd.Timedelta(timeperiod) | pandas.Timedelta |
"""
This module merges temperature, humidity, and influenza data together
"""
import pandas as pd
import ast
__author__ = '<NAME>'
__license__ = 'MIT'
__status__ = 'release'
__url__ = 'https://github.com/caominhduy/TH-Flu-Modulation'
__version__ = '1.0.0'
def merge_flu(path='data/epidemiology/processed_CDC_2008_2021.csv'):
df = pd.read_csv(path, low_memory=False)
df['week'] = df['week'].astype('int')
df['year'] = df['year'].astype('int')
cols = ['state', 'week', 'year', 'level']
df = df.reindex(columns=cols)
return df
def merge_weather():
with open('data/geodata/state_abbr.txt', 'r') as f:
contents = f.read()
state_abbr_dict = ast.literal_eval(contents)
states = list(state_abbr_dict.values())
df_temp = pd.DataFrame(columns=['week', 'temp', 'state', 'year'])
df_humid = pd.DataFrame(columns=['week', 'humid', 'state', 'year'])
for year in list(range(2008, 2020)):
y = str(year)
df = pd.read_csv('data/weather/' + y + '-temp.csv')
temps = df[states[0]]
weeks = df['week']
snames = pd.Series(states)
snames = snames.repeat(len(weeks)).reset_index(drop=True)
for s in states[1:]:
temps = temps.append(df[s]).reset_index(drop=True)
weeks = weeks.append(df['week']).reset_index(drop=True)
frames = {'week': weeks, 'temp': temps, 'state': snames}
df2 = pd.DataFrame(frames)
df2['year'] = y
df_temp = df_temp.append(df2)
for year in list(range(2008, 2020)):
y = str(year)
df = pd.read_csv('data/weather/' + y + '-humid.csv')
humids = df[states[0]]
weeks = df['week']
snames = pd.Series(states)
snames = snames.repeat(len(weeks)).reset_index(drop=True)
for s in states[1:]:
humids = humids.append(df[s]).reset_index(drop=True)
weeks = weeks.append(df['week']).reset_index(drop=True)
frames = {'week': weeks, 'humid': humids, 'state': snames}
df2 = | pd.DataFrame(frames) | pandas.DataFrame |
#!/usr/bin/python2
from __future__ import nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals
import pandas as pd
from sklearn.model_selection import train_test_split
RANDOM_STATE = 3
all_cols = 'linenum text id subreddit meta time author ups downs authorlinkkarma authorkarma authorisgold'.split()
use_cols = 'text subreddit'.split()
all_files = 'entertainment_anime.csv entertainment_comicbooks.csv gaming_dota2.csv gaming_leagueoflegends.csv news_conservative.csv news_libertarian.csv learning_askscience.csv learning_explainlikeimfive.csv television_gameofthrones.csv television_thewalkingdead.csv'.split()
malformed_files = [1, 1, 0, 0, 0, 0, 0, 0, 0, 0]
def open_with_pandas_read_csv(filename):
if malformed_files[all_files.index(filename[7:])] == 1:
df = pd.read_csv(filename, header=0, usecols=use_cols, names=['linenum'] + all_cols, skiprows=1)
else:
df = pd.read_csv(filename, header=0, usecols=use_cols, names=all_cols)
return df
def clean_data(df):
df = df.dropna()
df = df.drop_duplicates()
def remove_links(text):
# remove word after each word in banned_prewords
banned_prewords = "http https v".split()
words = text.split()
to_delete = []
for i, word in enumerate(words):
if word in banned_prewords:
if i + 1 < len(words):
to_delete.append(i + 1)
for i in reversed(to_delete):
del words[i]
text = " ".join(words)
return text
df['text'] = df['text'].apply(remove_links)
df = df[df['text'] != '']
return df
def file_to_dataframe(filename):
df = open_with_pandas_read_csv(filename)
df = clean_data(df)
return df
def main():
print("Reading all files")
frames = [file_to_dataframe('../res/' + filename) for filename in all_files]
all_data = pd.concat(frames)
all_data.to_csv('../res/data_all.csv')
print("Creating small sample for testing purposes")
small_sample = pd.concat([df.sample(n=100, random_state=RANDOM_STATE) for df in frames])
small_sample.to_csv('../res/data_sample.csv')
print("splitting all data into train & test sets")
train_test_splits = [train_test_split(df, test_size=0.2, random_state=RANDOM_STATE) for df in frames]
training_and_validation = [train for (train, test) in train_test_splits]
print("splitting training set into train & validation sets")
validation_splits = [train_test_split(df, test_size=1.0 / 8, random_state=RANDOM_STATE) for df in training_and_validation]
training = [train for (train, valid) in validation_splits]
validation = [valid for (train, valid) in validation_splits]
training = | pd.concat(training) | pandas.concat |
from typing import Dict, List, Tuple, Union
import geopandas
import numpy as np
import pandas as pd
from .matches import iter_matches
from .static import ADMINISTRATIVE_DIVISIONS, POSTCODE_MUNICIPALITY_LOOKUP
from .static import df as STATIC_DF
INDEX_COLS = ["municipality", "postcode", "street_nominative", "house_nr"]
def is_valid_idx(x: Tuple[str, str, str, str]):
if not x[0] and not x[1] and not x[2]:
return False
return True
def merge_tuples(
sq: Tuple[
Union[str, slice], Union[str, slice], Union[str, slice], Union[str, slice]
],
res: pd.MultiIndex,
) -> Tuple[str, str, str, str]:
"""Replace tuple values where the index is an empty slice.
Behaviour change in pandas 1.4, in previous versions the full index was returned.
Post 1.4, pandas returns only the missing levels.
:param sq: query tuple
:type sq: Tuple[ Union[str, slice], Union[str, slice], Union[str, slice], Union[str, slice] ]
:param res: index part
:type res: Tuple
:return: Full lookup value
:rtype: Tuple[str, str, str, str]
"""
out = list(sq)
for n in res.names:
idx = INDEX_COLS.index(n)
out[idx] = res.get_level_values(n)[0]
return tuple(out)
def _build_municipality_street_to_postcode(
df: pd.DataFrame,
) -> Dict[Tuple[str, str], str]:
"""Builds a lookup table of
(municipality, street) => postcode
Non unique matches, i.e. a street spanning more than a single postcode are dropped.
:param df: [description]
:type df: pd.DataFrame
:return: [description]
:rtype: Dict[Tuple[str, str], str]
"""
out = {}
delete_list = []
for t, sn, sd, pc in (
df[["municipality", "street_nominative", "street_dative", "postcode"]]
.drop_duplicates()
.values
):
if (t, sn) in out and out[(t, sn)] != str(pc):
delete_list.append((t, sn))
continue
out[(t, sn)] = str(pc)
out[(t, sd)] = str(pc)
for k in delete_list:
out.pop(k, None)
return out
class Lookup:
"""
Utility class for doing reverse geocoding lookups from the dataframe.
How it works:
- The dataframe has a few categorical columns whose code values are used
for constructing a multidimensional search tree.
- When querying, a best-effort approach is used to translate the
input string into a vector to query the tree.
"""
df: pd.DataFrame
town_street_to_postcode: Dict[Tuple[str, str], str]
streets: List[str]
house_nrs: List[str]
postcodes: List[str]
municipalities: List[str]
street_dative: Dict[str, str]
def __init__(self) -> "Lookup":
self.df = STATIC_DF.copy().sort_index()
self.town_street_to_postcode = _build_municipality_street_to_postcode(self.df)
self.streets = self.df.index.levels[2]
self.house_nrs = self.df.index.levels[3]
self.postcodes = self.df.index.levels[1]
self.municipalities = self.df.index.levels[0]
self.street_dative = dict(
self.df[["street_dative", "street_nominative"]]
.reset_index(drop=True)
.values
)
def text_to_vec( # pylint: disable=too-many-branches
self, s: str
) -> Tuple[str, str, str, str]:
"""Builds a tuple out of an address string.
* index 0, category value of the "municipality" category.
* index 1, category value of the "postcode" category.
* index 2, category value of the "street_nominative" category.
* index 3, category value of the "house_nr" category.
:param s: string containing address
:type s: str
:return: Address tuple
:rtype: Tuple[str, str, str, str]
"""
municipality = ""
postcode = ""
street = ""
house_nr = ""
admin_unit = ""
# Exit early if the string is empty
if not s:
return ("", "", "", "")
for w in s.split(" "):
w = w.strip(",.")
if not street and w in self.streets:
street = w
if not house_nr and (w.upper() in self.house_nrs or "-" in w):
house_nr = w
if not postcode and w in self.postcodes and w != house_nr:
postcode = w
municipality = POSTCODE_MUNICIPALITY_LOOKUP.get(int(postcode), "")
if not postcode and not municipality and w in self.municipalities:
municipality = w
if not municipality and w in ADMINISTRATIVE_DIVISIONS:
admin_unit = w
if admin_unit and street:
for tn in ADMINISTRATIVE_DIVISIONS[admin_unit]:
postcode = self.town_street_to_postcode.get((tn, street), "")
if not postcode:
continue
municipality = tn
break
# if we have municipality and street but no postcode, try looking it up
if municipality and street and not postcode:
postcode = self.town_street_to_postcode.get((municipality, street), "")
# Álftanes has a special case
if not postcode and municipality == "Garðabær":
postcode = self.town_street_to_postcode.get(
("Garðabær (Álftanes)", street)
)
if postcode:
municipality = "Garðabær (Álftanes)"
if house_nr and "-" in house_nr:
house_nr = house_nr.split("-")[0]
return (
municipality or "",
postcode or "",
street or "",
(house_nr or "").upper(),
)
def __query_vector_dataframe(self, q: pd.DataFrame) -> pd.DataFrame:
"""Given a data frame with index:
[municipality, postcode, street_nominative, house_nr]
and columns "qidx" (query index) and "order", matches exact and
partial matches to the address dataframe.
:param q: query dataframe
:type q: pd.DataFrame
:return: query dataframe with additional address columns
:rtype: pd.DataFrame
"""
# get intersecting indexes
found = self.df.index.intersection(q.index)
idx_names = self.df.index.names
# find indexes in the query dataframe which couldn't be found,
# these could be empty queries or partial matches.
missing = q.index.difference(found).unique()
if len(missing):
# create a set of the found values, the purpose is to have a mutable
# data structure to work with.
found_missing = set(found.values)
# get unique set of missing queries
miss_df = q.loc[missing].drop_duplicates()
# keep track of query idx that have not been found
not_found_qidx = set()
missing_data = []
miss_df = miss_df.loc[miss_df.index.map(is_valid_idx)]
# as the address dataframe is fairly large, constrict the search
# space to the records loosely matching what's being queried for. For
# large datasets, this speeds up querying considerably.
search_selector = [
slice(None) if (i[0] == "" and len(i) == 1) else i
for i in [
i.values.tolist()
for i in miss_df.index.remove_unused_levels().levels
]
]
search_space = self.df.loc[tuple(search_selector), :]
# iterate rows of valid missing indexes
for tvec, row in miss_df.iterrows():
qidx = row["qidx"]
# the index is 4 levels, [municipality, postcode, street, house_nr],
# all of these values are allowed to be an empty string, except at
# this point it is clear that a key with an empty string could not
# be found in the index.
# Replace all empty strings with a None slice and query the address dataframe
sq = tuple((i or slice(None) for i in tvec))
# NOTE: Author has not founded a vectorized approach to querying the
# source dataframe and matching the query index back with the result.
try:
res = search_space.loc[sq]
except KeyError:
continue
# if an exact match could be found, assign it as the value of the query
# dataframe for the given index. In case there are duplicates, check
# if it's already been found
if len(res) == 1:
# mark the returned tuple for addition to the found indexes
res_val = merge_tuples(sq, res.index)
found_missing.add(res_val)
# create a new row for the missing data
missing_data.append(res_val + tuple(row))
# mark old data query index for deletion
not_found_qidx.add(qidx)
# NOTE: here there are multiple matches, theoretically possible to train
# a model which would give higher priority to a generic address determined
# by its frequency over a corpus.
# delete found qidx from the original query frame
q = q[~q["qidx"].isin(not_found_qidx)]
# concat the missing data found with the query data frame
q = pd.concat(
[
q,
pd.DataFrame(
missing_data, columns=idx_names + q.columns.tolist()
).set_index(idx_names),
]
)
# rebuild the multiindex after mutating it
found = pd.MultiIndex.from_tuples(list(found_missing), names=idx_names)
# select indexable records, right join with the query dataframe
# and sort by the original query order.
out = self.df.loc[found].join(q, how="right").sort_values("order")
# fill NaN string values
out[
[
"municipality",
"postcode",
"special_name",
"house_nr",
"street_dative",
"street_nominative",
]
] = out[
[
"municipality",
"postcode",
"special_name",
"house_nr",
"street_dative",
"street_nominative",
]
].fillna(
value=""
)
out.reset_index(level=[0, 1, 2, 3], drop=True, inplace=True)
return out
def query_dataframe(
self,
q: pd.DataFrame,
) -> pd.DataFrame:
"""Queries a data frame containing structued data,
columns [postcode, house_nr, street/street_nominative] are
required, [municipality] is optional.
:param q: query dataframe
:type q: pd.DataFrame
:return: query dataframe with additional address columns
:rtype: pd.DataFrame
"""
cols = q.columns
q["postcode"] = q["postcode"].astype(str)
if "municipality" not in cols:
q["municipality"] = q["postcode"].apply(
lambda pc: POSTCODE_MUNICIPALITY_LOOKUP.get(
int(pc) if pc.isdigit() else -1, ""
)
)
q["house_nr"] = q["house_nr"].astype(str)
if "street" in cols and "street_nominative" not in cols:
q = q.rename(columns={"street": "street_nominative"})
q["street_nominative"] = q["street_nominative"].apply(
lambda v: self.street_dative.get(v, v)
)
q["qidx"] = pd.Categorical(
q[self.df.index.names].apply(
lambda x: "/".join(x.dropna().astype(str).values), axis=1
)
).codes
q["order"] = list(range(len(q)))
q = q.set_index(keys=self.df.index.names)
return self.__query_vector_dataframe(q)
def query( # pylint: disable=too-many-locals
self, text: Union[str, List[str], np.ndarray]
) -> geopandas.GeoDataFrame:
"""Given text input, returns a dataframe with matching addresses
:param text: string containing a single address or an iterator
containing multiple addresses.
:type text: Union[str, List[str], np.ndarray]
:return: Data frame containg addresses
:rtype: geopandas.GeoDataFrame
"""
if isinstance(text, str):
text = [text]
# strip whitespace from text
text = [t.strip() for t in text]
# tokenize strings into a list of tuples,
# [municipality, postcode, street_nominative, house_nr]
vecs = [self.text_to_vec(t) for t in text]
# construct dataframe from parsed results
q = | pd.DataFrame(vecs, columns=self.df.index.names) | pandas.DataFrame |
#python3 LED_inference.py arg1 where arg1 is the dataset -> transformers or 'others'
#if the dataset is 'others' it must exist a csv on 'datasets' folder with the name introduced
from datasets import Dataset
import pandas as pd
import torch
import os
import csv
import sys
import gcc
from transformers import LEDForConditionalGeneration, LEDTokenizer
from datasets import load_dataset, load_metric
BUCKET_NAME = 'transformers-lucia'
#dataset to summarize
source_dataset=sys.argv[1]
num_dataset=int(sys.argv[2])
#Dowload model pre-trained
print("Downloading pre-trained model...")
tokenizer = LEDTokenizer.from_pretrained("allenai/led-large-16384-arxiv")
model = LEDForConditionalGeneration.from_pretrained("allenai/led-large-16384-arxiv").to("cuda").half()
#Function to generate predictions
def generate_answer(batch):
inputs_dict = tokenizer(batch['Text'], padding="max_length", max_length=16384, return_tensors="pt", truncation=True)
input_ids = inputs_dict.input_ids.to("cuda")
attention_mask = inputs_dict.attention_mask.to("cuda")
global_attention_mask = torch.zeros_like(attention_mask)
# put global attention on <s> token
global_attention_mask[:, 0] = 1
predicted_abstract_ids = model.generate(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask, max_length=512, num_beams=4)
batch["predicted_abstract"] = tokenizer.batch_decode(predicted_abstract_ids, skip_special_tokens=True)
return batch
#Function to generate csv with results
def generate_csv(source_summary, results, csv_name):
#Create csv with results
createCSV=[["input_text","gold_summary","predicted_summary"]]
for number, element in enumerate(results):
print(element['Text'])
print()
print(source_summary[number])
print()
print(element['predicted_abstract'])
createCSV.append([element['Text'],source_summary[number],element['predicted_abstract']])
with open(csv_name, 'w', newline='') as file:
writer = csv.writer(file,delimiter='|')
writer.writerows(createCSV)
#To evaluate with transformers datasets
if(source_dataset=='transformers'):
#List with the datasets
datasetInfo = [['cnn_dailymail', '3.0.0', 'article', 'highlights'],
['gigaword', '1.2.0', 'document', 'summary'],
['xsum', '1.1.0', 'document', 'summary'],
['biomrc', 'biomrc_large_B', 'abstract','title']]
#Execute all the transformer's datasets
if (num_dataset == -1):
print("Executing all transformer's datasets")
#execute the code for all the datasets
for datasetToSummarize in datasetInfo:
name_dataset=datasetToSummarize[0]
version_dataset=datasetToSummarize[1]
test_dataset_full = load_dataset(name_dataset, version_dataset, split="test")
print("Dataset selected: "+name_dataset)
#Create a dataframe with column name text to process after
text_column_name = datasetToSummarize[2]
text_column = test_dataset_full[text_column_name]
text_column = pd.DataFrame(text_column, columns=["Text"])
test_dataset = Dataset.from_pandas(text_column)
summary_column = datasetToSummarize[3]
#Generate results
results = test_dataset.map(generate_answer, batched=True, batch_size=2)
#Save results
path_results = "./led_inference/summaries/"+name_dataset+".csv"
generate_csv(test_dataset_full[summary_column], results, path_results)
#Move results to bucket and remove from notebook
gcc.upload_blob(BUCKET_NAME,path_results,'tfg/summaries/LED/'+name_dataset+'.csv' )
os.remove(path_results)
#Save results
generate_csv(results, "summaries/"+name_dataset+".csv")
#Execute specific huggingface dataset
elif (num_dataset < 4 and num_dataset > -1):
name_dataset = datasetInfo[num_dataset][0]
print("Dataset selected: "+name_dataset)
version_dataset = datasetInfo[num_dataset][1]
test_dataset_full = load_dataset(name_dataset, version_dataset, split="test")
#Create a dataframe with column name text to process after
text_column_name = datasetInfo[num_dataset][2]
text_column = test_dataset_full[text_column_name]
text_column = | pd.DataFrame(text_column, columns=["Text"]) | pandas.DataFrame |
import matplotlib
#matplotlib.use('TkAgg')
from config import *
from plot_utils import *
from shared_utils import *
import pickle as pkl
import numpy as np
from collections import OrderedDict
from matplotlib import pyplot as plt
from pymc3.stats import quantiles
import os
import pandas as pd
from pathlib import Path
# def curves(use_interactions=True, use_report_delay=True, prediction_day=30, save_plot=False):
# Load only one county
def curves(start, county, n_weeks=3, model_i=35, save_plot=False):
with open('../data/counties/counties.pkl', "rb") as f:
counties = pkl.load(f)
start = int(start)
n_weeks = int(n_weeks)
model_i = int(model_i)
# with open('../data/comparison.pkl', "rb") as f:
# best_model = pkl.load(f)
# update to day and new limits!
xlim = (5.5, 15.5)
ylim = (47, 56) # <- 10 weeks
#countyByName = OrderedDict(
# [('Düsseldorf', '05111'), ('Leipzig', '14713'), ('Nürnberg', '09564'), ('München', '09162')])
countyByName = make_county_dict()
# Hier dann das reinspeisen
plot_county_names = {"covid19": [county]}
start_day = pd.Timestamp('2020-01-28') + pd.Timedelta(days=start)
year = str(start_day)[:4]
month = str(start_day)[5:7]
day = str(start_day)[8:10]
# if os.path.exists("../figures/{}_{}_{}/curve_trend_{}.png".format(year, month, day,countyByName[county])):
# return
day_folder_path = "../figures/{}_{}_{}".format(year, month, day)
Path(day_folder_path).mkdir(parents=True, exist_ok=True)
# check for metadata file:
if not os.path.isfile("../figures/{}_{}_{}/metadata.csv".format(year, month, day)):
ids = []
for key in counties:
ids.append(int(key))
df = pd.DataFrame(data=ids, columns=["countyID"])
df["probText"] = ""
df.to_csv("../figures/{}_{}_{}/metadata.csv".format(year, month, day))
# colors for curves
#red
C1 = "#D55E00"
C2 = "#E69F00"
#C3 = "#0073CF"
#green
C4 = "#188500"
C5 = "#29c706"
#C6 = "#0073CF"
# quantiles we want to plot
qs = [0.25, 0.50, 0.75]
fig = plt.figure(figsize=(12, 6))
grid = plt.GridSpec(
1,
1,
top=0.9,
bottom=0.2,
left=0.07,
right=0.97,
hspace=0.25,
wspace=0.15,
)
# for i, disease in enumerate(diseases):
i = 0
disease = "covid19"
prediction_region = "germany"
data = load_daily_data_n_weeks(start, n_weeks, disease, prediction_region, counties)
start_day = pd.Timestamp('2020-01-28') + pd.Timedelta(days=start)
i_start_day = 0
day_0 = start_day + pd.Timedelta(days=n_weeks*7+5)
day_m5 = day_0 - | pd.Timedelta(days=5) | pandas.Timedelta |
# -*- coding: utf-8 -*-
"""
Created on Sun May 8 18:29:53 2016
@author: bmanubay
"""
# Check what moelcules we have appear in Chris's list
import pandas as pd
# read in ; delimited csv of comp/mix counts created in thermomlcnts.py
a0 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/allcomp_counts_all.csv", sep=';')
a1 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/allcomp_counts_interesting.csv", sep=';')
a2 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/bincomp_counts_all.csv", sep=';')
a3 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/bincomp_counts_interesting.csv", sep=';')
a4 = | pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/mix_counts_all.csv", sep=';') | pandas.read_csv |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import os.path
import pkg_resources
import tempfile
import unittest
import numpy as np
import pandas as pd
from qiime2.metadata import (Metadata, CategoricalMetadataColumn,
NumericMetadataColumn, MetadataFileError)
def get_data_path(filename):
return pkg_resources.resource_filename('qiime2.metadata.tests',
'data/%s' % filename)
# NOTE: many of the test files in the `data` directory intentionally have
# leading/trailing whitespace characters on some lines, as well as mixed usage
# of spaces, tabs, carriage returns, and newlines. When editing these files,
# please make sure your code editor doesn't strip these leading/trailing
# whitespace characters (e.g. Atom does this by default), nor automatically
# modify the files in some other way such as converting Windows-style CRLF
# line terminators to Unix-style newlines.
#
# When committing changes to the files, carefully review the diff to make sure
# unintended changes weren't introduced.
class TestLoadErrors(unittest.TestCase):
def test_path_does_not_exist(self):
with self.assertRaisesRegex(MetadataFileError,
"Metadata file path doesn't exist"):
Metadata.load(
'/qiime2/unit/tests/hopefully/this/path/does/not/exist')
def test_path_is_directory(self):
fp = get_data_path('valid')
with self.assertRaisesRegex(MetadataFileError,
"path points to something other than a "
"file"):
Metadata.load(fp)
def test_non_utf_8_file(self):
fp = get_data_path('invalid/non-utf-8.tsv')
with self.assertRaisesRegex(MetadataFileError,
'encoded as UTF-8 or ASCII'):
Metadata.load(fp)
def test_utf_16_le_file(self):
fp = get_data_path('invalid/simple-utf-16le.txt')
with self.assertRaisesRegex(MetadataFileError,
'UTF-16 Unicode'):
Metadata.load(fp)
def test_utf_16_be_file(self):
fp = get_data_path('invalid/simple-utf-16be.txt')
with self.assertRaisesRegex(MetadataFileError,
'UTF-16 Unicode'):
Metadata.load(fp)
def test_empty_file(self):
fp = get_data_path('invalid/empty-file')
with self.assertRaisesRegex(MetadataFileError,
'locate header.*file may be empty'):
Metadata.load(fp)
def test_comments_and_empty_rows_only(self):
fp = get_data_path('invalid/comments-and-empty-rows-only.tsv')
with self.assertRaisesRegex(MetadataFileError,
'locate header.*only of comments or empty '
'rows'):
Metadata.load(fp)
def test_header_only(self):
fp = get_data_path('invalid/header-only.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_header_only_with_comments_and_empty_rows(self):
fp = get_data_path(
'invalid/header-only-with-comments-and-empty-rows.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_qiime1_empty_mapping_file(self):
fp = get_data_path('invalid/qiime1-empty.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_invalid_header(self):
fp = get_data_path('invalid/invalid-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'unrecognized ID column name.*'
'invalid_id_header'):
Metadata.load(fp)
def test_empty_id(self):
fp = get_data_path('invalid/empty-id.tsv')
with self.assertRaisesRegex(MetadataFileError, 'empty metadata ID'):
Metadata.load(fp)
def test_whitespace_only_id(self):
fp = get_data_path('invalid/whitespace-only-id.tsv')
with self.assertRaisesRegex(MetadataFileError, 'empty metadata ID'):
Metadata.load(fp)
def test_empty_column_name(self):
fp = get_data_path('invalid/empty-column-name.tsv')
with self.assertRaisesRegex(MetadataFileError,
'column without a name'):
Metadata.load(fp)
def test_whitespace_only_column_name(self):
fp = get_data_path('invalid/whitespace-only-column-name.tsv')
with self.assertRaisesRegex(MetadataFileError,
'column without a name'):
Metadata.load(fp)
def test_duplicate_ids(self):
fp = get_data_path('invalid/duplicate-ids.tsv')
with self.assertRaisesRegex(MetadataFileError,
'IDs must be unique.*id1'):
Metadata.load(fp)
def test_duplicate_ids_with_whitespace(self):
fp = get_data_path('invalid/duplicate-ids-with-whitespace.tsv')
with self.assertRaisesRegex(MetadataFileError,
'IDs must be unique.*id1'):
Metadata.load(fp)
def test_duplicate_column_names(self):
fp = get_data_path('invalid/duplicate-column-names.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Column names must be unique.*col1'):
Metadata.load(fp)
def test_duplicate_column_names_with_whitespace(self):
fp = get_data_path(
'invalid/duplicate-column-names-with-whitespace.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Column names must be unique.*col1'):
Metadata.load(fp)
def test_id_conflicts_with_id_header(self):
fp = get_data_path('invalid/id-conflicts-with-id-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
"ID 'id' conflicts.*ID column header"):
Metadata.load(fp)
def test_column_name_conflicts_with_id_header(self):
fp = get_data_path(
'invalid/column-name-conflicts-with-id-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column name 'featureid' conflicts.*ID "
"column header"):
Metadata.load(fp)
def test_column_types_unrecognized_column_name(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
'not_a_column.*column_types.*not a column '
'in the metadata file'):
Metadata.load(fp, column_types={'not_a_column': 'numeric'})
def test_column_types_unrecognized_column_type(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
'col2.*column_types.*unrecognized column '
'type.*CATEGORICAL'):
Metadata.load(fp, column_types={'col1': 'numeric',
'col2': 'CATEGORICAL'})
def test_column_types_not_convertible_to_numeric(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column 'col3' to numeric.*could not be "
"interpreted as numeric: 'bar', 'foo'"):
Metadata.load(fp, column_types={'col1': 'numeric',
'col2': 'categorical',
'col3': 'numeric'})
def test_column_types_override_directive_not_convertible_to_numeric(self):
fp = get_data_path('valid/simple-with-directive.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column 'col3' to numeric.*could not be "
"interpreted as numeric: 'bar', 'foo'"):
Metadata.load(fp, column_types={'col3': 'numeric'})
def test_directive_before_header(self):
fp = get_data_path('invalid/directive-before-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'directive.*#q2:types.*searching for '
'header'):
Metadata.load(fp)
def test_unrecognized_directive(self):
fp = get_data_path('invalid/unrecognized-directive.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Unrecognized directive.*#q2:foo.*'
'#q2:types directive is supported'):
Metadata.load(fp)
def test_duplicate_directives(self):
fp = get_data_path('invalid/duplicate-directives.tsv')
with self.assertRaisesRegex(MetadataFileError,
'duplicate directive.*#q2:types'):
Metadata.load(fp)
def test_unrecognized_column_type_in_directive(self):
fp = get_data_path('invalid/unrecognized-column-type.tsv')
with self.assertRaisesRegex(MetadataFileError,
'col2.*unrecognized column type.*foo.*'
'#q2:types directive'):
Metadata.load(fp)
def test_column_types_directive_not_convertible_to_numeric(self):
fp = get_data_path('invalid/types-directive-non-numeric.tsv')
# This error message regex is intentionally verbose because we want to
# assert that many different types of non-numeric strings aren't
# interpreted as numbers. The error message displays a sorted list of
# all values that couldn't be converted to numbers, making it possible
# to test a variety of non-numeric strings in a single test case.
msg = (r"column 'col2' to numeric.*could not be interpreted as "
r"numeric: '\$42', '\+inf', '-inf', '0xAF', '1,000', "
r"'1\.000\.0', '1_000_000', '1e3e4', 'Infinity', 'NA', 'NaN', "
"'a', 'e3', 'foo', 'inf', 'nan', 'sample-1'")
with self.assertRaisesRegex(MetadataFileError, msg):
Metadata.load(fp)
def test_directive_after_directives_section(self):
fp = get_data_path(
'invalid/directive-after-directives-section.tsv')
with self.assertRaisesRegex(MetadataFileError,
'#q2:types.*outside of the directives '
'section'):
Metadata.load(fp)
def test_directive_longer_than_header(self):
fp = get_data_path('invalid/directive-longer-than-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'row has 5 cells.*header declares 4 '
'cells'):
Metadata.load(fp)
def test_data_longer_than_header(self):
fp = get_data_path('invalid/data-longer-than-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'row has 5 cells.*header declares 4 '
'cells'):
Metadata.load(fp)
class TestLoadSuccess(unittest.TestCase):
def setUp(self):
self.temp_dir_obj = tempfile.TemporaryDirectory(
prefix='qiime2-metadata-tests-temp-')
self.temp_dir = self.temp_dir_obj.name
# This Metadata object is compared against observed Metadata objects in
# many of the tests, so just define it once here.
self.simple_md = Metadata(
pd.DataFrame({'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
# Basic sanity check to make sure the columns are ordered and typed as
# expected. It'd be unfortunate to compare observed results to expected
# results that aren't representing what we think they are!
obs_columns = [(name, props.type)
for name, props in self.simple_md.columns.items()]
exp_columns = [('col1', 'numeric'), ('col2', 'categorical'),
('col3', 'categorical')]
self.assertEqual(obs_columns, exp_columns)
def tearDown(self):
self.temp_dir_obj.cleanup()
def test_simple(self):
# Simple metadata file without comments, empty rows, jaggedness,
# missing data, odd IDs or column names, directives, etc. The file has
# multiple column types (numeric, categorical, and something that has
# mixed numbers and strings, which must be interpreted as categorical).
fp = get_data_path('valid/simple.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_bom_simple_txt(self):
# This is the encoding that notepad.exe will use most commonly
fp = get_data_path('valid/BOM-simple.txt')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_different_file_extension(self):
fp = get_data_path('valid/simple.txt')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_no_newline_at_eof(self):
fp = get_data_path('valid/no-newline-at-eof.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_unix_line_endings(self):
fp = get_data_path('valid/unix-line-endings.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_windows_line_endings(self):
fp = get_data_path('valid/windows-line-endings.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_mac_line_endings(self):
fp = get_data_path('valid/mac-line-endings.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_no_source_artifacts(self):
fp = get_data_path('valid/simple.tsv')
metadata = Metadata.load(fp)
self.assertEqual(metadata.artifacts, ())
def test_retains_column_order(self):
# Explicitly test that the file's column order is retained in the
# Metadata object. Many of the test cases use files with column names
# in alphabetical order (e.g. "col1", "col2", "col3"), which matches
# how pandas orders columns in a DataFrame when supplied with a dict
# (many of the test cases use this feature of the DataFrame
# constructor when constructing the expected DataFrame).
fp = get_data_path('valid/column-order.tsv')
obs_md = Metadata.load(fp)
# Supply DataFrame constructor with explicit column ordering instead of
# a dict.
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_columns = ['z', 'y', 'x']
exp_data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar'],
[3.0, 'c', '42']
]
exp_df = pd.DataFrame(exp_data, index=exp_index, columns=exp_columns)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_leading_trailing_whitespace(self):
fp = get_data_path('valid/leading-trailing-whitespace.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_comments(self):
fp = get_data_path('valid/comments.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_empty_rows(self):
fp = get_data_path('valid/empty-rows.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_qiime1_mapping_file(self):
fp = get_data_path('valid/qiime1.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='#SampleID')
exp_df = pd.DataFrame({'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_qiita_sample_information_file(self):
fp = get_data_path('valid/qiita-sample-information.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id.1', 'id.2'], name='sample_name')
exp_df = pd.DataFrame({
'DESCRIPTION': ['description 1', 'description 2'],
'TITLE': ['A Title', 'Another Title']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_qiita_preparation_information_file(self):
fp = get_data_path('valid/qiita-preparation-information.tsv')
obs_md = Metadata.load(fp)
exp_index = | pd.Index(['id.1', 'id.2'], name='sample_name') | pandas.Index |
#!/usr/bin/env python
# coding: utf-8
'''
'''
import time
import pandas as pd
import datarobot as dr
from datarobot.models.modeljob import wait_for_async_model_creation
import numpy as np
import re
import os
from datarobot.errors import JobAlreadyRequested
token_id = ""
ts_setting = {"project_name":"fake_job_posting_210123","filename":"../Data/fake_job_postings.csv", \
"project_id": "60089b3d23aace3eea1810d0","model_id":"", \
"feature_list": "Informative Features","features":[],"set":"validation" , \
"AUC":"Weighted AUC", "LogLoss":"Weighted LogLoss", \
"downsampling": 36,"holdout_pct": 20,"validation_pct":16,"target":"fraudulent" }
parameter_name = ['stop_words','stemmer','num_ngram',"use_idf","pos_tagging"]
value = [1,"porter",[1,2,3,4],1,1]
param_df = pd.DataFrame(list(zip(parameter_name, value)),
columns =['parameter_name', 'value'])
dr.Client(token=token_id, endpoint='https://app.datarobot.com/api/v2')
def check_if_number(st):
tp = re.search("\d+",st)
if tp:
return int(tp.group())
else:
return np.nan
def get_min_max_salary (text):
'''
Get the min and max from the salary_range
:param text: string
:return: the min and max of a salary_range
'''
if type(text) == str:
if re.search("\-",text):
tp = text.split("-")
min_salary = check_if_number(tp[0].strip())
max_salary = check_if_number(tp[1].strip())
return min_salary,max_salary
else:
return np.nan,np.nan
else:
return np.nan, np.nan
def cleaned_location(text):
'''
Extract country, and country_and state from location
:param text: string with country, state, city
:return:
'''
country_state = ""
st = str(text)
if type(st) is str:
tp = re.search("[a-zA-Z]{2,}\s?\,(\s*[a-zA-Z0-9]+|\s)",st)
if tp:
country_state = tp.group().strip()
country = st.strip()[0:2]
else:
return "",""
return country,country_state
else:
return "",""
def create_binary_cat_for_education(text):
if pd.isnull(text) or pd.isna(text):
return "no"
elif text == "unspecified":
return "no"
else:
return "yes"
def PrepareDataSet():
'''
Prepare the dataset for fake_job_postings by adding new features.
:return: enriched original dataset with new features
'''
fake_jobs_df = pd.read_csv(ts_setting["filename"])
fake_jobs_df.min_salary = np.nan
fake_jobs_df.max_salary = np.nan
fake_jobs_df.salary_diff = np.nan
fake_jobs_df["min_salary"],fake_jobs_df["max_salary"] = zip(*fake_jobs_df["salary_range"].apply(get_min_max_salary))
fake_jobs_df["min_salary"] = pd.to_numeric(fake_jobs_df["min_salary"])
fake_jobs_df["max_salary"] = pd.to_numeric(fake_jobs_df["max_salary"])
fake_jobs_df["education_flag"] = [create_binary_cat_for_education(x) for x in fake_jobs_df["required_education"]]
fake_jobs_df["salary_range"] = fake_jobs_df.max_salary - fake_jobs_df.min_salary
fake_jobs_df["salary_diff"] = fake_jobs_df["salary_range"]/fake_jobs_df["min_salary"]
return fake_jobs_df
def start_project_with_settings(fake_jobs_df):
'''
Run a project for fake_jobs_df
:param fake_jobs_df: already enriched dataset
:return: project
'''
global ts_setting
advanced_options = dr.AdvancedOptions(
response_cap=0.7,
blueprint_threshold=2,
smart_downsampled=True, majority_downsampling_rate=ts_setting["downsampling"])
partition = dr.StratifiedTVH(ts_setting["holdout_pct"],ts_setting["validation_pct"], seed=0)
pandas_dataset = dr.Dataset.create_from_in_memory_data(data_frame=fake_jobs_df.drop(columns = ["job_id"]))
project = pandas_dataset.create_project(project_name = ts_setting["project_name"])
project.set_target(target= ts_setting["target"],mode = dr.enums.AUTOPILOT_MODE.QUICK,
partitioning_method=partition,
advanced_options = advanced_options,
worker_count = -1)
project.unlock_holdout()
project.wait_for_autopilot(verbosity=dr.VERBOSITY_LEVEL.SILENT)
return project
'''
From the project, find features, DataRobot set as text features
'''
def get_text_features(project):
'''
get text features
:param project: DataRobot Project
:return: list of features of type text
'''
raw = [feat_list for feat_list in project.get_featurelists()\
if feat_list.name == ts_setting["feature_list"]][0]
text_features = [
feat
for feat in raw.features if dr.Feature.get(project.id, feat).feature_type == "Text"
]
return text_features
#Get all the models for a given text field
def get_1_model_performance(model_p,text_feature,num_modified):
'''
Extract a model metrics
:param model_p: model of interest
:param text_feature: list of features of type text
:param num_modified: number of parameters modified
:return: performance of type dict
'''
global ts_setting
performance = {}
try:
roc = model_p.get_roc_curve(ts_setting["set"])
threshold = roc.get_best_f1_threshold()
metrics = roc.estimate_threshold(threshold)
performance = {"model_id":model_p.id,"text_feature":text_feature,"AUC":model_p.metrics[ts_setting["AUC"]][ts_setting["set"]], \
"sample_pct":model_p.sample_pct,
"LogLoss":model_p.metrics[ts_setting["LogLoss"]][ts_setting["set"]],
'f1_score':metrics['f1_score'],"sample_pct":model_p.sample_pct,\
'true_negative_rate': metrics['true_negative_rate'],
'false_positive_rate':metrics['false_positive_rate'],
'true_positive_rate':metrics['true_positive_rate'],\
'positive_predictive_value':metrics['positive_predictive_value'],\
'negative_predictive_value':metrics['negative_predictive_value'],\
'threshold':metrics['threshold'],'parameters_modified': num_modified}
return performance
except:
performance = {"model_id": model_p.id, "text_feature": text_feature,
"AUC": 0, \
"sample_pct": model_p.sample_pct,
"LogLoss": 1,
'f1_score': 0, "sample_pct": model_p.sample_pct, \
'true_negative_rate': 0,
'false_positive_rate': 0,
'true_positive_rate': 0, \
'positive_predictive_value': 0, \
'negative_predictive_value': 0, \
'threshold': 0, 'parameters_modified': num_modified}
return performance
#Get all the models for a given text field
#This function will have 2 uses: First, it will be used to find the best AutoTuned model for the
#text features, and then it will be used to compare the best model before the pre-processing and
#after the pre-processing. Keep only models that used less than 100 of dataset
def models_performance_for_text(text_feature,project):
'''
extract all models built only for text features
:param text_feature: list of features of type text
:param project: DataRobot project
:return: all models trained on less than 100% and trained on only the text features (Auto-Tuned Word N-gram )
'''
models_desc =project.get_models(
search_params={
'name': text_feature
})
df= | pd.DataFrame() | pandas.DataFrame |
"""
Test model.py module.
"""
import numpy as np
import pandas as pd
import pytest
from src import model
@pytest.fixture
def dummy_df():
"""Example data to test modeling utility functions"""
dummy_df = pd.DataFrame(data={"col1": list(range(100)), "target": list(range(100))})
return dummy_df
def test_parse_ratio():
"""Parses ratio as expected."""
ratio = "6:2:2"
sizes = model._parse_ratio(ratio)
assert sizes == [0.6, 0.2, 0.2]
def test_parse_ratio_missing_component():
"""Ratio only provides two components."""
ratio = "6:4"
sizes = model._parse_ratio(ratio)
assert sizes == [0.6, 0., 0.4]
def test_parse_ratio_invalid_ratio():
"""Ratio uses incorrect separator."""
ratio = "6;2;2"
with pytest.raises(ValueError):
model._parse_ratio(ratio)
def test_split_predictors_response(dummy_df):
"""Separate predictor variables from response."""
nrows, ncols = dummy_df.shape
features, target = model.split_predictors_response(dummy_df, target_col="target")
assert features.shape == (nrows, ncols - 1)
assert target.shape == (nrows, )
def test_split_predictors_response_invalid_target_col(dummy_df):
"""Specified target column does not exist in DataFrame."""
with pytest.raises(KeyError):
model.split_predictors_response(dummy_df, "class_dne")
def test_split_predictors_response_only_one_column():
"""Splitting a 1-column DataFrame yields a 0-col DF and Series."""
df = pd.DataFrame(data={"col1": list(range(100))})
features, target = model.split_predictors_response(df, target_col="col1")
pd.testing.assert_frame_equal(features, pd.DataFrame(index=list(range(100))))
pd.testing.assert_series_equal(target, pd.Series(data=list(range(100)), name="col1"))
def test_split_train_val_test(dummy_df):
"""Splits into train/val/test with dimensions as expected."""
target = dummy_df["target"]
features = dummy_df.drop("target", axis=1)
splits = model.split_train_val_test(features, target, "6:2:2")
assert len(splits) == 6 # Features and response for each of train/val/test
X_train, X_val, X_test, y_train, y_val, y_test = splits
assert X_train.shape == (60, 1)
assert X_val.shape == (20, 1)
assert X_test.shape == (20, 1)
assert y_train.shape == (60, )
assert y_val.shape == (20, )
assert y_test.shape == (20, )
def test_split_train_val_test_no_validation(dummy_df):
"""Splits into train/test (no val) with dimensions as expected."""
target = dummy_df["target"]
features = dummy_df.drop("target", axis=1)
# Ratio specifies all three numbers
X_train, X_test, y_train, y_test = model.split_train_val_test(features, target, "3:0:1")
assert X_train.shape == (75, 1)
assert X_test.shape == (25, 1)
assert y_train.shape == (75,)
assert y_test.shape == (25,)
# Ratio provides only two numbers
X_train, X_test, y_train, y_test = model.split_train_val_test(features, target, "3:1")
assert X_train.shape == (75, 1)
assert X_test.shape == (25, 1)
assert y_train.shape == (75, )
assert y_test.shape == (25, )
def test_split_train_val_test_bad_ratio(dummy_df):
"""Ratio results in an empty train, validation, or test set."""
target = dummy_df["target"]
features = dummy_df.drop("target", axis=1)
with pytest.raises(ValueError):
model.split_train_val_test(features, target, "10:0:0")
with pytest.raises(ValueError):
model.split_train_val_test(features, target, "0:10:0")
with pytest.raises(ValueError):
model.split_train_val_test(features, target, "0:0:10")
def test_parse_dict_to_dataframe():
"""Convert a dictionary to `pandas.DataFrame` format."""
colnames = ["col1", "col2", "col3"]
sample_data = [1.5, 3.0, 4.5]
data_dict = dict(zip(colnames, sample_data))
actual_df = model.parse_dict_to_dataframe(data_dict)
expected_df = | pd.DataFrame(data=[sample_data], columns=colnames) | pandas.DataFrame |
'''
Created on April 15, 2012
Last update on July 18, 2015
@author: <NAME>
@author: <NAME>
@author: <NAME>
'''
import pandas as pd
class Columns(object):
OPEN='Open'
HIGH='High'
LOW='Low'
CLOSE='Close'
VOLUME='Volume'
# def get(df, col):
# return(df[col])
# df['Close'] => get(df, COL.CLOSE)
# price=COL.CLOSE
indicators=["MA", "EMA", "MOM", "ROC", "ATR", "BBANDS", "PPSR", "STOK", "STO",
"TRIX", "ADX", "MACD", "MassI", "Vortex", "KST", "RSI", "TSI", "ACCDIST",
"Chaikin", "MFI", "OBV", "FORCE", "EOM", "CCI", "COPP", "KELCH", "ULTOSC",
"DONCH", "STDDEV"]
class Settings(object):
join=True
col=Columns()
SETTINGS=Settings()
def out(settings, df, result):
if not settings.join:
return result
else:
df=df.join(result)
return df
def MA(df, n, price='Close'):
"""
Moving Average
"""
name='MA_{n}'.format(n=n)
result = pd.Series(pd.rolling_mean(df[price], n), name=name)
return out(SETTINGS, df, result)
def EMA(df, n, price='Close'):
"""
Exponential Moving Average
"""
result=pd.Series(pd.ewma(df[price], span=n, min_periods=n - 1), name='EMA_' + str(n))
return out(SETTINGS, df, result)
def MOM(df, n, price='Close'):
"""
Momentum
"""
result=pd.Series(df[price].diff(n), name='Momentum_' + str(n))
return out(SETTINGS, df, result)
def ROC(df, n, price='Close'):
"""
Rate of Change
"""
M = df[price].diff(n - 1)
N = df[price].shift(n - 1)
result = pd.Series(M / N, name='ROC_' + str(n))
return out(SETTINGS, df, result)
def ATR(df, n):
"""
Average True Range
"""
i = 0
TR_l = [0]
while i < len(df) - 1: # df.index[-1]:
# for i, idx in enumerate(df.index)
# TR=max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR = max(df['High'].iloc[i + 1], df['Close'].iloc[i] - min(df['Low'].iloc[i + 1], df['Close'].iloc[i]))
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
result = pd.Series(pd.ewma(TR_s, span=n, min_periods=n), name='ATR_' + str(n))
return out(SETTINGS, df, result)
def BBANDS(df, n, price='Close'):
"""
Bollinger Bands
"""
MA = pd.Series(pd.rolling_mean(df[price], n))
MSD = pd.Series(pd.rolling_std(df[price], n))
b1 = 4 * MSD / MA
B1 = pd.Series(b1, name='BollingerB_' + str(n))
b2 = (df[price] - MA + 2 * MSD) / (4 * MSD)
B2 = pd.Series(b2, name='Bollinger%b_' + str(n))
result = pd.DataFrame([B1, B2]).transpose()
return out(SETTINGS, df, result)
def PPSR(df):
"""
Pivot Points, Supports and Resistances
"""
PP = pd.Series((df['High'] + df['Low'] + df['Close']) / 3)
R1 = pd.Series(2 * PP - df['Low'])
S1 = pd.Series(2 * PP - df['High'])
R2 = pd.Series(PP + df['High'] - df['Low'])
S2 = pd.Series(PP - df['High'] + df['Low'])
R3 = pd.Series(df['High'] + 2 * (PP - df['Low']))
S3 = pd.Series(df['Low'] - 2 * (df['High'] - PP))
result = pd.DataFrame([PP, R1, S1, R2, S2, R3, S3]).transpose()
return out(SETTINGS, df, result)
def STOK(df):
"""
Stochastic oscillator %K
"""
result = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
return out(SETTINGS, df, result)
def STO(df, n):
"""
Stochastic oscillator %D
"""
SOk = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
result = pd.Series(pd.ewma(SOk, span=n, min_periods=n - 1), name='SO%d_' + str(n))
return out(SETTINGS, df, result)
def SMA(df, timeperiod, key='Close'):
result = pd.Series(pd.rolling_mean(df[key], timeperiod, min_periods=timeperiod), name='SMA_' + str(timeperiod))
return out(SETTINGS, df, result)
def TRIX(df, n):
"""
Trix
"""
EX1 = pd.ewma(df['Close'], span=n, min_periods=n - 1)
EX2 = pd.ewma(EX1, span=n, min_periods=n - 1)
EX3 = pd.ewma(EX2, span=n, min_periods=n - 1)
i = 0
ROC_l = [0]
while i + 1 <= len(df) - 1: # df.index[-1]:
ROC = (EX3[i + 1] - EX3[i]) / EX3[i]
ROC_l.append(ROC)
i = i + 1
result = pd.Series(ROC_l, name='Trix_' + str(n))
return out(SETTINGS, df, result)
def ADX(df, n, n_ADX):
"""
Average Directional Movement Index
"""
i = 0
UpI = []
DoI = []
while i + 1 <= len(df) - 1: # df.index[-1]:
UpMove = df.get_value(i + 1, 'High') - df.get_value(i, 'High')
DoMove = df.get_value(i, 'Low') - df.get_value(i + 1, 'Low')
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
i = 0
TR_l = [0]
while i < len(df) - 1: # df.index[-1]:
TR = max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(pd.ewma(TR_s, span=n, min_periods=n))
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(pd.ewma(UpI, span=n, min_periods=n - 1) / ATR,name='PosDI')
NegDI = pd.Series(pd.ewma(DoI, span=n, min_periods=n - 1) / ATR,name='NegDI')
result = pd.Series(pd.ewma(abs(PosDI - NegDI) / (PosDI + NegDI), span=n_ADX, min_periods=n_ADX - 1), name='ADX_' + str(n) + '_' + str(n_ADX))
result = pd.concat([df,PosDI,NegDI,result], join='outer', axis=1,ignore_index=True)
result.columns=["High","Low","Close","PosDI","NegDI","ADX"]
return result
def MACD(df, n_fast, n_slow, price='Close'):
"""
MACD, MACD Signal and MACD difference
"""
EMAfast = pd.Series(pd.ewma(df[price], span=n_fast, min_periods=n_slow - 1))
EMAslow = pd.Series(pd.ewma(df[price], span=n_slow, min_periods=n_slow - 1))
MACD = pd.Series(EMAfast - EMAslow, name='MACD_%d_%d' % (n_fast, n_slow))
MACDsign = pd.Series(pd.ewma(MACD, span=9, min_periods=8), name='MACDsign_%d_%d' % (n_fast, n_slow))
MACDdiff = pd.Series(MACD - MACDsign, name='MACDdiff_%d_%d' % (n_fast, n_slow))
result = pd.DataFrame([MACD, MACDsign, MACDdiff]).transpose()
return out(SETTINGS, df, result)
def MassI(df):
"""
Mass Index
"""
Range = df['High'] - df['Low']
EX1 = pd.ewma(Range, span=9, min_periods=8)
EX2 = pd.ewma(EX1, span=9, min_periods=8)
Mass = EX1 / EX2
result = pd.Series(pd.rolling_sum(Mass, 25), name='Mass Index')
return out(SETTINGS, df, result)
def Vortex(df, n):
"""
Vortex Indicator
"""
i = 0
TR = [0]
while i < len(df) - 1: # df.index[-1]:
Range = max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR.append(Range)
i = i + 1
i = 0
VM = [0]
while i < len(df) - 1: # df.index[-1]:
Range = abs(df.get_value(i + 1, 'High') - df.get_value(i, 'Low')) - abs(df.get_value(i + 1, 'Low') - df.get_value(i, 'High'))
VM.append(Range)
i = i + 1
result = pd.Series(pd.rolling_sum(pd.Series(VM), n) / pd.rolling_sum(pd.Series(TR), n), name='Vortex_' + str(n))
return out(SETTINGS, df, result)
def KST(df, r1, r2, r3, r4, n1, n2, n3, n4):
"""
KST Oscillator
"""
M = df['Close'].diff(r1 - 1)
N = df['Close'].shift(r1 - 1)
ROC1 = M / N
M = df['Close'].diff(r2 - 1)
N = df['Close'].shift(r2 - 1)
ROC2 = M / N
M = df['Close'].diff(r3 - 1)
N = df['Close'].shift(r3 - 1)
ROC3 = M / N
M = df['Close'].diff(r4 - 1)
N = df['Close'].shift(r4 - 1)
ROC4 = M / N
result = pd.Series(pd.rolling_sum(ROC1, n1) + pd.rolling_sum(ROC2, n2) * 2 + pd.rolling_sum(ROC3, n3) * 3 + pd.rolling_sum(ROC4, n4) * 4, name='KST_' + str(r1) + '_' + str(r2) + '_' + str(r3) + '_' + str(r4) + '_' + str(n1) + '_' + str(n2) + '_' + str(n3) + '_' + str(n4))
return out(SETTINGS, df, result)
def RSI(df, n):
"""
Relative Strength Index
"""
i = 0
UpI = [0]
DoI = [0]
while i + 1 <= len(df) - 1: # df.index[-1]
UpMove = df.iloc[i + 1]['High'] - df.iloc[i]['High']
DoMove = df.iloc[i]['Low'] - df.iloc[i + 1]['Low']
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(pd.ewma(UpI, span=n, min_periods=n - 1))
NegDI = pd.Series(pd.ewma(DoI, span=n, min_periods=n - 1))
result = pd.Series(PosDI / (PosDI + NegDI), name='RSI_' + str(n))
return out(SETTINGS, df, result)
def TSI(df, r, s):
"""
True Strength Index
"""
M = pd.Series(df['Close'].diff(1))
aM = abs(M)
EMA1 = pd.Series(pd.ewma(M, span=r, min_periods=r - 1))
aEMA1 = pd.Series(pd.ewma(aM, span=r, min_periods=r - 1))
EMA2 = pd.Series( | pd.ewma(EMA1, span=s, min_periods=s - 1) | pandas.ewma |
import matplotlib.pyplot as plt
import pandas as pd
from oneibl.one import ONE
from ibllib.time import isostr2date
# import sys
# sys.path.extend('/home/owinter/PycharmProjects/WGs/BehaviourAnaysis/python')
from load_mouse_data import get_behavior
from behavior_plots import plot_psychometric
one = ONE()
# https://alyx.internationalbrainlab.org/admin/actions/session/e752b02d-b54d-4373-b51e-0b31be5f8ee5/change/
# first get the subject information
subject_details = one.alyx.rest('subjects', 'read', 'IBL_14')
# plot the weight curve
# https://alyx.internationalbrainlab.org/admin-actions/water-history/37c8f897-cbcc-4743-bad6-764ccbbfb190
wei = | pd.DataFrame(subject_details['weighings']) | pandas.DataFrame |
import pandas as pd
import requests
# class name,必須跟檔案名一致,例如 class demo,檔名也是 demo.py
class demo:
def __init__(self,
stock_price,
**kwargs, ):
# -------------------------------------------------------------------
# 此區塊請勿更動
stock_price = stock_price.sort_values('date')
# 股價
self.stock_price = stock_price
# 融資融券
self.MarginPurchaseShortSale = kwargs.get("MarginPurchaseShortSale", pd.DataFrame())
# 三大法人買賣
self.InstitutionalInvestorsBuySell = kwargs.get("InstitutionalInvestorsBuySell", | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import sys
import os
from scipy.signal import find_peaks
from scipy.signal import butter, lfilter, freqz
import matplotlib.pyplot as plt
from get_peaks import load_dat_file
def get_mcell_observables_counts(dir):
counts = {}
seed_dirs = os.listdir(dir)
for seed_dir in seed_dirs:
if not seed_dir.startswith('seed_'):
continue
file_list = os.listdir(os.path.join(dir, seed_dir))
for file in file_list:
file_path = os.path.join(dir, seed_dir, file)
if os.path.isfile(file_path) and file.endswith('.dat'):
observable = os.path.splitext(file)[0]
if observable.endswith('_MDLString'):
observable = observable[:-len('_MDLString')]
if observable not in counts:
index = 0
else:
index = counts[observable].shape[1] - 1
col_name = 'count' + str(index)
df = pd.read_csv(file_path, sep=' ', names=['time', col_name])
if observable not in counts:
counts[observable] = df
else:
# add new column
counts[observable][col_name] = df[col_name]
return counts
def get_averages(dir):
dfA = pd.DataFrame()
dfR = pd.DataFrame()
counts = get_mcell_observables_counts(dir)
print(counts)
df = | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) 2020 Huawei Technologies Co., Ltd.
# <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from src.cpePaser import week_extract
from src.cpePaser import day_extract
from src.cpePaser import month_extract
from src.cpePaser import extract_data
import os
from src.compress import compress
import pandas as pd
from src.setting import setting
from src.timeOperator import timeOpt
import time
import multiprocessing
from src.logger_setting.my_logger import get_logger
logger = get_logger()
MONTH1 = '1'
MONTH2 = '2'
TRAIN = '0'
PREDICT = '1'
MAX_INVALID_VALUE = 9999
def get_data_by_range(first_day, last_day):
df = | pd.DataFrame(columns=setting.month_column_name) | pandas.DataFrame |
from typing import Tuple
import numpy as np
import pandas as pd
class DecisionStump:
def __init__(self, epsilon: float = 1e-6):
r"""A depth-1 decision tree classifier
Args:
epsilon: float
To classify all the points in the training set as +1,
the model will set the dividing line (threshold) to
threshold = min(x_best_feature) - epsilon
"""
self.epsilon = epsilon
self.best_feature = '' # label of the best feature column
self.threshold = 0.0 # dividing line
self.inverse = False
def train(self, X_train: pd.DataFrame, y_train: pd.Series,
weights: pd.Series = None, full_error: bool = False):
n_data = len(X_train)
# Compute errors for all possible dividing lines
errors = []
for feature in X_train.columns:
x_col = X_train[feature]
# Iterate over all data points
err = [self.weighted_error(y_train,
self._predict(x_col,
threshold=xi,
inverse=False),
weights)
for xi in x_col]
# Set the threshold below the minimum of current feature
threshold_min = min(x_col) - self.epsilon
y_pred = self._predict(x_col, threshold=threshold_min, inverse=False)
err.append(self.weighted_error(y_train, y_pred, weights))
# Store the errors
errors.append(pd.Series(err, name=f"{feature}"))
# Inverse the decision
# Iterate over all data points
err = [self.weighted_error(y_train,
self._predict(x_col,
threshold=xi,
inverse=True),
weights)
for xi in x_col]
# Set the threshold below the minimum of current feature
threshold_min = min(x_col) - self.epsilon
y_pred = self._predict(x_col, threshold=threshold_min, inverse=True)
err.append(self.weighted_error(y_train, y_pred, weights))
# Store the errors
errors.append( | pd.Series(err, name=f"{feature}-inverse") | pandas.Series |
#%%
# CARGO LOS DATASETS
import pandas as pd
import numpy as np
from shapely.geometry import Point
import shapely as shp
import geopandas as gpd
from geopandas.array import points_from_xy
path = "merged1_listas.pkl"
df_merge1 = pd.read_pickle(path)
df_merge1.reset_index(inplace=True)
#%%
#region PART 1: DESDE EL PRIMER MERGE HASTA COMPLETAR SPEAKERS
#Pasamos todas las string-lista a listas de verdad por si quedó alguna
def sep(x):
"""para pasar posibles string-listas a listas-listas"""
if (isinstance(x, str)) and ("[" in x):
return x.replace("'", "").strip("][").split(", ")
else:
return x
def sep_float(x):
"""para pasar posibles string-listas-float a listas-float"""
if (isinstance(x, str)) and ("[" in x):
lista = x.replace("'", "").strip("][").split(", ")
return [float(x_n) for x_n in lista]
elif isinstance(x,list):
return [float(x_n) for x_n in x]
else:
print(x)
return float(x)
#para cargar los points en geopandas como coordenadas
def topoint(x):
if isinstance(x, list):
return [shp.wkt.loads(point) for point in x]
else:
return shp.wkt.loads(x)
#%%
df1 = df_merge1.applymap(sep)
print('Ahora las columnas problematicas solo tienen listas-listas \n\n#Listas por columna:')
print(df1.applymap(lambda x: isinstance(x,list)).sum())
print('\nY las que tienen valores numéricos no tienen ningún string \n\n#Strings por columna:')
print(df1.applymap(lambda x: isinstance(x,str)).sum())
#%%
# MACROAREA: llenamos a mano los 14 NaN que encontramos
macronan = df1[df1["Macroarea"].isnull()]
# Agregamos macroareas en una copia del dataframe original
df2 = df1.copy()
macroareas_faltantes = [
"Papunesia",
"South America",
"Africa",
"Eurasia",
"Eurasia",
"Eurasia",
"Australia",
"Australia",
"North America",
"North America",
"Australia",
"Eurasia",
"Africa",
"Australia",
]
df2.loc[macronan.index, "Macroarea"] = macroareas_faltantes
#%%
# LENGUAJE DE SEÑAS: Quitamos las filas correspondientes
# cambio el nombre de la columna porque se confunde con los espacios y la ñ
df3 = df2.rename(columns={"Lenguaje de Señas": "senas"})
indexNames = df3[df3["senas"] == 1].index
df3.drop(indexNames, inplace=True)
df3 = df3.reset_index(drop=True)
#%%
# NUM_SPEAKER: Despues de haber chequado algunos datos en Ethnologe
# concluimos que le valor mas grande que aparece en la lista es el
# nro de hablantes total. Nos quedamos solo con este elemento
# de cada lista
# entradas que son listas
list_bool = df3["num_speakers"].apply(
lambda x: isinstance(x, list)
)
# lista de las listas
num_speaker_lists = df3["num_speakers"][list_bool]
# con la función lambda tomo el máximo, reemplazo esos valores en el df
df3.loc[num_speaker_lists.index, "num_speakers"] = num_speaker_lists.apply(
lambda x: max(x)
)
#%% COMENZAMOS CON EL ARMADO DE FEAUTURES
# Nueva feauture con la cantidad de paises
# Cantidad de elementos de la lista o no lista
def num_countries(x):
if isinstance(x,list):
return len(x)
else:
return 1
df3['cant_paises'] = df3['countryISO'].apply(lambda x: num_countries(x))
#%%
# Armo una nueva feauture hable de la cercania entre lenguajes
# La feauture sera cuantos lenguajes tiene a menos de cierta medida
# de distancia
#transformo los string-point a un objeto de la clase Point de shapely
df4 = df3.copy()
df4['Country_coord'] = df4['Country_coord'].apply(topoint)
col_latitude = []
col_longitude = []
for i, item in df4['Country_coord'].items():
if isinstance(item,Point):
col_latitude.append(item.x)
col_longitude.append(item.y)
elif isinstance(item,list):
sublist_lat = [subitem.x for subitem in item]
sublist_long = [subitem.y for subitem in item]
col_latitude.append(sublist_lat)
col_longitude.append(sublist_long)
df4['Country_lat'] = col_latitude
df4['Country_long'] = col_longitude
#%%
#esta parte la comento porque tarda en correr, por las dudas
# largo = len(df4)
# col_nearest = np.zeros(largo)
# print('Corremos el loop de euge para armar la feature nearest languages')
# for i in range(largo):
# print(i)
# if df4["cant_paises"][i] == 1:
# lat = df4["Latitude"][i]
# long = df4["Longitude"][i]
# cercanos = 0
# for j in range(largo):
# if j != i:
# if df4["cant_paises"][j] == 1:
# lat1 = df4["Latitude"][j]
# long1 = df4["Longitude"][j]
# dif_lat = lat1 - lat
# dif_long = long1 - long
# if (abs(dif_lat) < 10) and (abs(dif_long) < 10):
# cercanos = cercanos + 1
# else:
# for k in range(df4["cant_paises"][j]):
# lat1 = df4["Country_lat"][j][k]
# long1 = df4["Country_long"][j][k]
# dif_lat = lat1 - lat
# dif_long1 = long1 - long
# if (abs(dif_lat) < 10) and (abs(dif_long) < 10):
# cercanos = cercanos + 1
# else:
# cercanos = 0
# for m in range(df4["cant_paises"][i]):
# lat = df4["Country_lat"][i][m]
# long = df4["Country_long"][i][m]
# for j in range(largo):
# if j != i:
# if df4["cant_paises"][j] == 1:
# lat1 = df4["Latitude"][j]
# long1 = df4["Longitude"][j]
# dif_lat = lat1 - lat
# dif_long = long1 - long
# if (abs(dif_lat) < 10) and (abs(dif_long) < 10):
# cercanos = cercanos + 1
# else:
# for k in range(df4["cant_paises"][j]):
# lat1 = df4["Country_lat"][j][k]
# long1 = df4["Country_long"][j][k]
# dif_lat = lat1 - lat
# dif_long1 = long1 - long
# if (abs(dif_lat) < 10) and (abs(dif_long) < 10):
# cercanos = cercanos + 1
# col_nearest[i] = cercanos
#endregion
#%%
# df4.to_pickle('paso1.pkl')
# df4.to_csv('paso1.csv')
#%%
#region PART 2: AGREGAR SPEAKERS DE CADA UNA AL DF PRINCIPAL
#df4 = pd.read_pickle('paso1.pkl')
#df_principal = df4.copy()
ingrid = pd.read_csv('/home/ingrid/Documents/labodatos/TP_final/df_principal/completados_INGRID.csv')
euge = pd.read_excel('/home/ingrid/Documents/labodatos/TP_final/df_principal/df_num_speakers_euge_arreglado_v2.xlsx')
mai = pd.read_csv('/home/ingrid/Documents/labodatos/TP_final/df_principal/completados_MAIA.csv')
romi = | pd.read_csv('/home/ingrid/Documents/labodatos/TP_final/df_principal/romi_completos.csv') | pandas.read_csv |
import psycopg2
import pandas as pd
import db.db_access as access
CONST_SQL_GET_TWITTER_DET = 'SELECT id, main_company_id, twitter_keyword, twitter_cashtag, twitter_url, is_parent_company FROM company'
CONST_SQL_GET_MAIN_COMPANY = 'SELECT * FROM maincompany'
CONST_SQL_GET_COMPANY_DETAIL = 'SELECT * FROM company_detail'
CONST_SQL_GET_MAT_VIEW_KEYWORD = 'SELECT * FROM public.day_keyword_materialised'
CONST_SQL_GET_MAT_VIEW_CASHTAG = 'SELECT * FROM public.day_cashtag_materialised'
CONST_SQL_LAST_DATE_TWEET = """SELECT MAX(date_utc) FROM {TABLE} WHERE company_id = '{COMPANY_ID}';"""
CONST_SQL_GET_MAT_VIEW_FOR_COMPANY = """SELECT """
# for daily twitter webservice
CONST_SQL_GET_TWEETS_BY_CASHTAG = """SELECT * FROM "public"."parsing_twitter_cashtag" t WHERE t.search = {CASHTAG} AND TO_CHAR(t.date, 'yyyy-mm-dd')= + {DATE}"""
class TwitterConnectorDbSignal(object):
def __init__(self):
self.host, self.port, self.database, self.user, self.password = access.postgre_access_google_cloud()
def get_psql_context(self):
cnx = psycopg2.connect(host=self.host, port=self.port, database=self.database, user=self.user,
password=self.password)
return cnx
def get_twitter_detail(self):
cnx = self.get_psql_context()
cur = cnx.cursor()
try:
CONST_SQL_GET_TWITTER_DET = 'SELECT * FROM twitter_detail'
cur.execute(CONST_SQL_GET_TWITTER_DET)
result = cur.fetchall()
result = pd.DataFrame.from_records(result, columns=[x[0] for x in cur.description])
return result
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
cur.close()
def get_last_date_tweet(self, company_id, table_name):
cnx = self.get_psql_context()
cur = cnx.cursor()
query = CONST_SQL_LAST_DATE_TWEET.format(TABLE=str(table_name), COMPANY_ID=str(company_id))
cur.execute(query)
result = cur.fetchall()
return result[0][0]
def get_main_company(self):
cnx = self.get_psql_context()
cur = cnx.cursor()
try:
cur.execute(CONST_SQL_GET_MAIN_COMPANY)
result = cur.fetchall()
result = pd.DataFrame.from_records(result, columns=[x[0] for x in cur.description])
return result
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
cur.close()
def get_company_detail(self):
cnx = self.get_psql_context()
cur = cnx.cursor()
try:
cur.execute(CONST_SQL_GET_COMPANY_DETAIL)
result = cur.fetchall()
result = pd.DataFrame.from_records(result, columns=[x[0] for x in cur.description])
return result
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
cur.close()
def get_link_tweet_from_period_range(self, company_id, date_utc, table_name):
cnx = self.get_psql_context()
cur = cnx.cursor()
try:
CONST_SQL_GET_LINK_LIKES_PERIOD_RANGE = """SELECT tweet_id, link, tweet, nlikes, sentiment_score_vader, cashtags
FROM {TABLE} WHERE is_spam is False AND company_id = '{COMPANY_ID}' AND date_utc BETWEEN '{DATE_UTC}' AND '{DATE_UTC_2} 23:59:59.997';"""
query = CONST_SQL_GET_LINK_LIKES_PERIOD_RANGE.format(TABLE=str(table_name), COMPANY_ID=str(company_id), DATE_UTC=str(date_utc), DATE_UTC_2=str(date_utc))
cur.execute(query)
result = cur.fetchall()
result = pd.DataFrame.from_records(result, columns=[x[0] for x in cur.description])
return result
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
cur.close()
def get_more_tweet(self, company_id, table_name):
cnx = self.get_psql_context()
cur = cnx.cursor()
try:
CONST_SQL_GET_LINK_LIKES_PERIOD_RANGE = """SELECT tweet_id, link, tweet, nlikes, sentiment_score_vader, cashtags
FROM {TABLE} WHERE is_spam is False AND company_id = '{COMPANY_ID}' AND nlikes >=1 ORDER BY date_utc DESC LIMIT 40"""
query = CONST_SQL_GET_LINK_LIKES_PERIOD_RANGE.format(TABLE=str(table_name), COMPANY_ID=str(company_id))
cur.execute(query)
result = cur.fetchall()
result = pd.DataFrame.from_records(result, columns=[x[0] for x in cur.description])
return result
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
cur.close()
def get_materialised_view_keyword(self):
cnx = self.get_psql_context()
cur = cnx.cursor()
try:
cur.execute(CONST_SQL_GET_MAT_VIEW_KEYWORD)
result = cur.fetchall()
result = pd.DataFrame.from_records(result, columns=[x[0] for x in cur.description])
return result
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
cur.close()
def get_materialised_view_cashtag(self):
cnx = self.get_psql_context()
cur = cnx.cursor()
try:
cur.execute(CONST_SQL_GET_MAT_VIEW_CASHTAG)
result = cur.fetchall()
result = pd.DataFrame.from_records(result, columns=[x[0] for x in cur.description])
return result
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
cur.close()
def get_materialised_view_for_company_id(self):
cnx = self.get_psql_context()
cur = cnx.cursor()
try:
cur.execute(CONST_SQL_GET_MAT_VIEW_FOR_COMPANY)
result = cur.fetchall()
result = | pd.DataFrame.from_records(result, columns=[x[0] for x in cur.description]) | pandas.DataFrame.from_records |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
self.assertEqual(repr(df), exp)
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({'int64': np.random.randint(100, size=n)})
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category'] == 'd']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5, 1], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
s = pd.Series(pd.Categorical(
["a", "b", "c", "c", "c", "b"], categories=["c", "a", "b", "d"]))
res = s.value_counts(sort=False)
exp = Series([3, 1, 2, 0],
index=pd.CategoricalIndex(["c", "a", "b", "d"]))
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp = Series([3, 2, 1, 0],
index=pd.CategoricalIndex(["c", "b", "a", "d"]))
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# https://github.com/pydata/pandas/issues/9443
s = pd.Series(["a", "b", "a"], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
s = pd.Series(["a", "b", None, "a", None, None], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
tm.assert_series_equal(
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with | tm.assert_produces_warning(FutureWarning) | pandas.util.testing.assert_produces_warning |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Laps.py
# Interact with the RaceMonitor lap timing system
# TODO:
# When in live race mode timestamps are tagging with an offset different than the historical view.
# If this time offset can be adjusted it would be preferable to store the data in live view format over the weekend.
from __future__ import print_function, unicode_literals
from PyInquirer import prompt, print_json
from pprint import pprint
from operator import itemgetter
from influxdb import InfluxDBClient
import os
import sys
import subprocess
import requests
import json
import pickle
import time
import signal
import csv
import pandas
import logging
import argparse
new_competitor = True
underline = "-" * 80
race_id = ''
racer_id = ''
car_number = ''
selected_class = ''
upper_class = ''
race_live = True
parser = argparse.ArgumentParser(description='Interact with lap data')
parser.add_argument('race_id', metavar='race_id', nargs=1, type=int, action='store')
parser.add_argument('car_number', metavar='car_number', nargs=1, type=int, action='store')
parser.add_argument('-c', '--class', metavar='A/B/C', dest='selected_class', nargs='?', type=ascii, action='store', help='Group or filter by class (A/B/C)')
parser.add_argument('-m', '--monitor', dest='monitor_mode', default=False, action='store_true', help='Update when new data received')
parser.add_argument('-n', '--network', dest='network_mode', default=False, action='store_true', help='Forward lap data via influx')
parser.add_argument('-o', '--out', dest='save_file', default=False, action='store_true', help='Write lap times to CSV')
parser.add_argument('-v', '--verbose', help="Set debug logging", action='store_true')
parser.set_defaults(monitor_mode=False, network_mode=False)
args = parser.parse_args()
def main():
if args.verbose:
print(args)
# Set logging level - https://docs.python.org/3/howto/logging.html#logging-basic-tutorial
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
else:
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# Pandas default max rows truncating lap times. I don't expect a team to do more than 1024 laps.
| pandas.set_option("display.max_rows", 1024) | pandas.set_option |
from sales_analysis.data_pipeline import BASEPATH
from sales_analysis.data_pipeline._pipeline import SalesPipeline
import pytest
import os
import pandas as pd
# --------------------------------------------------------------------------
# Fixtures
@pytest.fixture
def pipeline():
FILEPATH = os.path.join(BASEPATH, "data")
DATA_FILES = [f for f in os.listdir(FILEPATH) if f.endswith('.csv')]
DATA = {f : pd.read_csv(os.path.join(FILEPATH, f)) for f in DATA_FILES}
return SalesPipeline(**DATA)
# --------------------------------------------------------------------------
# Data
data = {'customers': {pd.Timestamp('2019-08-01 00:00:00'): 9,
pd.Timestamp('2019-08-02 00:00:00'): 10,
pd.Timestamp('2019-08-03 00:00:00'): 10,
pd.Timestamp('2019-08-04 00:00:00'): 10,
| pd.Timestamp('2019-08-05 00:00:00') | pandas.Timestamp |
import numpy as np
import pandas as pd
#import scipy.stats as stats
import matplotlib.pyplot as plt
#factores (constantes) para grupos de 4 observaciones
factorA2 = 0.729
factorD4 = 2.282
factorD3 = 0
#6 observaciones, 24 instancias
arrayDatos = np.array([[1010,991,985,986],
[995,996,1009,1001],
[990,1003,994,997],
[1015,1020,998,981],
[1013,1019,997,999],
[994,1001,1009,1011],
[989,992,1021,1000],
[1001,986,1005,987],
[1006,989,999,982],
[992,1007,996,979],
[996,1006,1009,989],
[1019,996,978,999],
[981,991,1000,1017],
[1015,993,980,1009],
[1023,1008,1000,998],
[993,1011,987,998],
[998,998,996,997],
[999,995,1009,1000],
[1020,1023,998,978],
[1018,1016,990,983],
[989,998,1010,999],
[992,997,1023,1003],
[988,1015,992,987],
[1009,1011,1009,983],
[991,996,980,972],
[1000,989,990,994],
[990,987,999,1008],
[993,1001,1018,1020],
[991,1017,988,999],
[1020,999,987,1011]])
datos = | pd.DataFrame(arrayDatos) | pandas.DataFrame |
# Zip lists: zipped_lists
zipped_lists = zip(feature_names, row_vals)
# Create a dictionary: rs_dict
rs_dict = dict(zipped_lists)
# Print the dictionary
print(rs_dict)
# Define lists2dict()
def lists2dict(list1, list2):
"""Return a dictionary where list1 provides
the keys and list2 provides the values."""
# Zip lists: zipped_lists
zipped_lists = zip(list1, list2)
# Create a dictionary: rs_dict
rs_dict = dict(zipped_lists)
# Return the dictionary
return rs_dict
# Call lists2dict: rs_fxn
rs_fxn = lists2dict(feature_names, row_vals)
# Print rs_fxn
print(rs_fxn)
# Print the first two lists in row_lists
print(row_lists[0])
print(row_lists[1])
# Turn list of lists into list of dicts: list_of_dicts
list_of_dicts = [lists2dict(feature_names, sublist) for sublist in row_lists]
# Print the first two dictionaries in list_of_dicts
print(list_of_dicts[0])
print(list_of_dicts[1])
# Import the pandas package
import pandas as pd
# Turn list of lists into list of dicts: list_of_dicts
list_of_dicts = [lists2dict(feature_names, sublist) for sublist in row_lists]
# Turn list of dicts into a DataFrame: df
df = pd.DataFrame(list_of_dicts)
# Print the head of the DataFrame
print(df.head())
# Open a connection to the file
with open("world_dev_ind.csv") as file:
# Skip the column names
file.readline()
# Initialize an empty dictionary: counts_dict
counts_dict = {}
# Process only the first 1000 rows
for j in range(1000):
# Split the current line into a list: line
line = file.readline().split(',')
# Get the value for the first column: first_col
first_col = line[0]
# If the column value is in the dict, increment its value
if first_col in counts_dict.keys():
counts_dict[first_col] += 1
# Else, add to the dict and set value to 1
else:
counts_dict[first_col] = 1
# Print the resulting dictionary
print(counts_dict)
# Define read_large_file()
def read_large_file(file_object):
"""A generator function to read a large file lazily."""
# Loop indefinitely until the end of the file
while True:
# Read a line from the file: data
data = file_object.readline()
# Break if this is the end of the file
if not data:
break
# Yield the line of data
yield data
# Open a connection to the file
with open('world_dev_ind.csv') as file:
# Create a generator object for the file: gen_file
gen_file = read_large_file(file)
# Print the first three lines of the file
print(next(gen_file))
print(next(gen_file))
print(next(gen_file))
# Initialize an empty dictionary: counts_dict
counts_dict = {}
# Open a connection to the file
with open("world_dev_ind.csv") as file:
# Iterate over the generator from read_large_file()
for line in read_large_file(file):
row = line.split(',')
first_col = row[0]
if first_col in counts_dict.keys():
counts_dict[first_col] += 1
else:
counts_dict[first_col] = 1
# Print
print(counts_dict)
# Import the pandas package
import pandas as pd
# Initialize reader object: df_reader
df_reader = pd.read_csv("ind_pop.csv", chunksize=10)
# Print two chunks
print(next(df_reader))
print(next(df_reader))
# Initialize reader object: urb_pop_reader
urb_pop_reader = pd.read_csv("ind_pop_data.csv", chunksize=1000)
# Get the first DataFrame chunk: df_urb_pop
df_urb_pop = next(urb_pop_reader)
# Check out the head of the DataFrame
print(df_urb_pop.head())
# Check out specific country: df_pop_ceb
df_pop_ceb = df_urb_pop[df_urb_pop["CountryCode"] == "CEB"]
# Zip DataFrame columns of interest: pops
pops = zip(df_pop_ceb["Total Population"], df_pop_ceb["Urban population (% of total)"])
# Turn zip object into list: pops_list
pops_list = list(pops)
# Print pops_list
print(pops_list)
# Initialize reader object: urb_pop_reader
urb_pop_reader = pd.read_csv("ind_pop_data.csv", chunksize=1000)
# Get the first DataFrame chunk: df_urb_pop
df_urb_pop = next(urb_pop_reader)
# Check out specific country: df_pop_ceb
df_pop_ceb = df_urb_pop[df_urb_pop['CountryCode'] == 'CEB']
# Zip DataFrame columns of interest: pops
pops = zip(df_pop_ceb['Total Population'],
df_pop_ceb['Urban population (% of total)'])
# Turn zip object into list: pops_list
pops_list = list(pops)
# Use list comprehension to create new DataFrame column 'Total Urban Population'
df_pop_ceb['Total Urban Population'] = [int(tup[0]*tup[1]/100) for tup in pops_list]
# Plot urban population data
df_pop_ceb.plot(kind="scatter", x="Year", y="Total Urban Population")
plt.show()
# Initialize reader object: urb_pop_reader
urb_pop_reader = | pd.read_csv('ind_pop_data.csv', chunksize=1000) | pandas.read_csv |
#!/usr/bin/env python
import argparse
import glob
import os
from abc import abstractmethod, ABC
from collections import defaultdict
import logging
import numpy as np
import pandas as pd
from sklearn.model_selection import RepeatedKFold
from qpputils import dataparser as dp
# TODO: change the functions to work with pandas methods such as idxmax
# TODO: Consider change to the folds file to be more convenient for pandas DF
parser = argparse.ArgumentParser(description='Cross Validation script',
usage='Use CV to optimize correlation',
epilog='Prints the average correlation')
parser.add_argument('-p', '--predictions', metavar='predictions_dir', default='predictions',
help='path to prediction results files directory')
parser.add_argument('--labeled', default='baseline/QLmap1000', help='path to labeled list res')
parser.add_argument('-r', '--repeats', default=30, help='number of repeats')
parser.add_argument('-k', '--splits', default=2, help='number of k-fold')
parser.add_argument('-m', '--measure', default='pearson', type=str,
help='default correlation measure type is pearson', choices=['pearson', 'spearman', 'kendall'], )
parser.add_argument("-g", "--generate", help="generate new CrossValidation sets", action="store_true")
parser.add_argument('-f', "--folds_file", metavar='CV_FILE_PATH', help="load existing CrossValidation JSON res",
default='2_folds_30_repetitions.json')
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO)
class CrossValidation:
def __init__(self, folds_map_file=None, k=2, rep=30, predictions_dir=None, test='pearson', ap_file=None,
generate_folds=False, **kwargs):
logging.debug("testing logger")
self.k = k
self.rep = rep
self.test = test
assert predictions_dir, 'Specify predictions dir'
assert folds_map_file, 'Specify path for CV folds file'
predictions_dir = os.path.abspath(os.path.normpath(os.path.expanduser(predictions_dir)))
assert os.listdir(predictions_dir), f'{predictions_dir} is empty'
self.output_dir = dp.ensure_dir(predictions_dir.replace('predictions', 'evaluation'))
if ap_file:
self.full_set = self._build_full_set(predictions_dir, ap_file)
if '-' in ap_file:
self.ap_func = ap_file.split('-')[-1]
else:
self.ap_func = 'basic'
else:
self.full_set = self._build_full_set(predictions_dir)
if generate_folds:
self.index = self.full_set.index
self.folds_file = self._generate_k_folds()
self.__load_k_folds()
else:
try:
self.folds_file = dp.ensure_file(folds_map_file)
except FileExistsError:
print("The folds file specified doesn't exist, going to generate the file and save")
self.__load_k_folds()
# self.corr_df = NotImplemented
@abstractmethod
def calc_function(self, df: pd.DataFrame):
raise NotImplementedError
@staticmethod
def _build_full_set(predictions_dir, ap_file=None):
"""Assuming the predictions files are named : predictions-[*]"""
all_files = glob.glob(predictions_dir + "/*predictions*")
if 'uef' in predictions_dir:
# Excluding all the 5 and 10 docs predictions
if 'qf' in predictions_dir:
all_files = [fn for fn in all_files if
not os.path.basename(fn).endswith('-5+', 11, 14) and not os.path.basename(fn).endswith(
'-10+', 11, 15)]
else:
all_files = [fn for fn in all_files if
not os.path.basename(fn).endswith('-5') and not os.path.basename(fn).endswith('-10')]
list_ = []
for file_ in all_files:
fname = file_.split('-')[-1]
df = dp.ResultsReader(file_, 'predictions').data_df
df = df.rename(columns={"score": f'score_{fname}'})
list_.append(df)
if ap_file:
ap_df = dp.ResultsReader(ap_file, 'ap').data_df
list_.append(ap_df)
full_set = pd.concat(list_, axis=1, sort=True)
assert not full_set.empty, f'The Full set DF is empty, make sure that {predictions_dir} is not empty'
return full_set
def _generate_k_folds(self):
# FIXME: Need to fix it to generate a DF with folds, without redundancy
""" Generates a k-folds json res
:rtype: str (returns the saved JSON filename)
"""
rkf = RepeatedKFold(n_splits=self.k, n_repeats=self.rep)
count = 1
# {'set_id': {'train': [], 'test': []}}
results = defaultdict(dict)
for train, test in rkf.split(self.index):
train_index, test_index = self.index[train], self.index[test]
if count % 1 == 0:
results[int(count)]['a'] = {'train': train_index, 'test': test_index}
else:
results[int(count)]['b'] = {'train': train_index, 'test': test_index}
count += 0.5
temp = pd.DataFrame(results)
temp.to_json(f'{self.k}_folds_{self.rep}_repetitions.json')
return f'{self.k}_folds_{self.rep}_repetitions.json'
def __load_k_folds(self):
# self.data_sets_map = pd.read_json(self.file_name).T['a'].apply(pd.Series).rename(
# mapper={'train': 'fold-1', 'test': 'fold-2'}, axis='columns')
self.data_sets_map = | pd.read_json(self.folds_file) | pandas.read_json |
# -*- coding: utf-8 -*-
import pandas as pd
import pymysql
import pymysql.cursors
from functools import reduce
import numpy as np
import pandas as pd
import uuid
import datetime
from sklearn.feature_extraction import DictVectorizer
from sklearn.metrics.pairwise import pairwise_distances
import json
import common.common as common
import common.config as cfg
np.set_printoptions(precision=3)
np.set_printoptions(suppress=True)
import common.schedule_util as sched_util
# 处理mqlog中记录的新的电影评分, 通过sql读取mqlog中类型为c的电影评分动作, 查询出相关的用户\电影\评分信息, 然后追加到训练集的csv上(训练集的csv会变大)
def process_comment_by_log(process_func):
connection = common.get_connection()
sql = 'select * from mqlog where logtype = \'c\' and pulled = 0 limit 0, 1000'
try:
comment_id_list = []
movie_id_list = []
user_id_list = []
message_id_list = []
with connection.cursor() as cursor:
cursor.execute(sql)
while True:
r = cursor.fetchone()
if r is None:
break
message = json.loads(r[2])
comment_id = message['id']
comment_id_list.append(comment_id)
movie_id = message['movieid']
movie_id_list.append(movie_id)
user_id = message['userid']
user_id_list.append(user_id)
message_id_list.append(r[0])
print('process comment\'s id collection is:' + str(comment_id_list))
if len(comment_id_list) == 0 or len(movie_id_list) == 0 or len(user_id_list) == 0:
print('No new available comment')
return
process_func(comment_id_list, movie_id_list, user_id_list, connection)
# 把mqlog表的pulled表标记为1, 即处理过此消息,不再二次处理
with connection.cursor() as cursor4update:
for id_ in message_id_list:
update_sql = 'update mqlog set pulled=1 where id=\'%s\'' % id_
cursor4update.execute(update_sql)
connection.commit()
except Exception as e:
print(e)
connection.close()
connection.close()
# 处理新的电影评分的具体处理函数,作为回调函数被传递使用
def process_new_comment_collection(comment_list, movie_list, user_list, conn):
exp_comment = '(\'' + reduce(lambda x, y: x+'\',\''+y, comment_list) + '\')'
c_sql = 'select * from comment_new where id in ' + exp_comment
print(c_sql)
exp_movie = '(\'' + reduce(lambda x, y: x+'\',\''+y, movie_list) + '\')'
m_sql = 'select * from movie where id in ' + exp_movie
print(m_sql)
exp_user = '(\'' + reduce(lambda x, y: x+'\',\''+y, user_list) + '\')'
u_sql = "select * from userproex_new where userid in " + exp_user
print(u_sql)
df_incremental = pd.read_sql_query(c_sql, conn)
df_movie = | pd.read_sql_query(m_sql, conn) | pandas.read_sql_query |
"""
Tests for the pandas.io.common functionalities
"""
import mmap
import os
import re
import pytest
from pandas.compat import FileNotFoundError, StringIO, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
import pandas.util.testing as tm
import pandas.io.common as icom
class CustomFSPath(object):
"""For testing fspath on unknown objects"""
def __init__(self, path):
self.path = path
def __fspath__(self):
return self.path
# Functions that consume a string path and return a string or path-like object
path_types = [str, CustomFSPath]
try:
from pathlib import Path
path_types.append(Path)
except ImportError:
pass
try:
from py.path import local as LocalPath
path_types.append(LocalPath)
except ImportError:
pass
HERE = os.path.abspath(os.path.dirname(__file__))
# https://github.com/cython/cython/issues/1720
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestCommonIOCapabilities(object):
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_expand_user(self):
filename = '~/sometest'
expanded_name = icom._expand_user(filename)
assert expanded_name != filename
assert os.path.isabs(expanded_name)
assert os.path.expanduser(filename) == expanded_name
def test_expand_user_normal_path(self):
filename = '/somefolder/sometest'
expanded_name = icom._expand_user(filename)
assert expanded_name == filename
assert os.path.expanduser(filename) == expanded_name
@td.skip_if_no('pathlib')
def test_stringify_path_pathlib(self):
rel_path = icom._stringify_path(Path('.'))
assert rel_path == '.'
redundant_path = icom._stringify_path(Path('foo//bar'))
assert redundant_path == os.path.join('foo', 'bar')
@td.skip_if_no('py.path')
def test_stringify_path_localpath(self):
path = os.path.join('foo', 'bar')
abs_path = os.path.abspath(path)
lpath = LocalPath(path)
assert icom._stringify_path(lpath) == abs_path
def test_stringify_path_fspath(self):
p = CustomFSPath('foo/bar.csv')
result = icom._stringify_path(p)
assert result == 'foo/bar.csv'
@pytest.mark.parametrize('extension,expected', [
('', None),
('.gz', 'gzip'),
('.bz2', 'bz2'),
('.zip', 'zip'),
('.xz', 'xz'),
])
@pytest.mark.parametrize('path_type', path_types)
def test_infer_compression_from_path(self, extension, expected, path_type):
path = path_type('foo/bar.csv' + extension)
compression = icom._infer_compression(path, compression='infer')
assert compression == expected
def test_get_filepath_or_buffer_with_path(self):
filename = '~/sometest'
filepath_or_buffer, _, _, should_close = icom.get_filepath_or_buffer(
filename)
assert filepath_or_buffer != filename
assert os.path.isabs(filepath_or_buffer)
assert os.path.expanduser(filename) == filepath_or_buffer
assert not should_close
def test_get_filepath_or_buffer_with_buffer(self):
input_buffer = StringIO()
filepath_or_buffer, _, _, should_close = icom.get_filepath_or_buffer(
input_buffer)
assert filepath_or_buffer == input_buffer
assert not should_close
def test_iterator(self):
reader = pd.read_csv(StringIO(self.data1), chunksize=1)
result = pd.concat(reader, ignore_index=True)
expected = pd.read_csv(StringIO(self.data1))
tm.assert_frame_equal(result, expected)
# GH12153
it = pd.read_csv(StringIO(self.data1), chunksize=1)
first = next(it)
tm.assert_frame_equal(first, expected.iloc[[0]])
tm.assert_frame_equal(pd.concat(it), expected.iloc[1:])
@pytest.mark.parametrize('reader, module, error_class, fn_ext', [
(pd.read_csv, 'os', FileNotFoundError, 'csv'),
(pd.read_fwf, 'os', FileNotFoundError, 'txt'),
(pd.read_excel, 'xlrd', FileNotFoundError, 'xlsx'),
(pd.read_feather, 'feather', Exception, 'feather'),
(pd.read_hdf, 'tables', FileNotFoundError, 'h5'),
(pd.read_stata, 'os', FileNotFoundError, 'dta'),
(pd.read_sas, 'os', FileNotFoundError, 'sas7bdat'),
(pd.read_json, 'os', ValueError, 'json'),
(pd.read_msgpack, 'os', ValueError, 'mp'),
(pd.read_pickle, 'os', FileNotFoundError, 'pickle'),
])
def test_read_non_existant(self, reader, module, error_class, fn_ext):
pytest.importorskip(module)
path = os.path.join(HERE, 'data', 'does_not_exist.' + fn_ext)
with pytest.raises(error_class):
reader(path)
@pytest.mark.parametrize('reader, module, error_class, fn_ext', [
(pd.read_csv, 'os', FileNotFoundError, 'csv'),
(pd.read_fwf, 'os', FileNotFoundError, 'txt'),
(pd.read_excel, 'xlrd', FileNotFoundError, 'xlsx'),
(pd.read_feather, 'feather', Exception, 'feather'),
(pd.read_hdf, 'tables', FileNotFoundError, 'h5'),
(pd.read_stata, 'os', FileNotFoundError, 'dta'),
(pd.read_sas, 'os', FileNotFoundError, 'sas7bdat'),
(pd.read_json, 'os', ValueError, 'json'),
(pd.read_msgpack, 'os', ValueError, 'mp'),
(pd.read_pickle, 'os', FileNotFoundError, 'pickle'),
])
def test_read_expands_user_home_dir(self, reader, module,
error_class, fn_ext, monkeypatch):
pytest.importorskip(module)
path = os.path.join('~', 'does_not_exist.' + fn_ext)
monkeypatch.setattr(icom, '_expand_user',
lambda x: os.path.join('foo', x))
message = "".join(["foo", os.path.sep, "does_not_exist.", fn_ext])
with pytest.raises(error_class, message=re.escape(message)):
reader(path)
def test_read_non_existant_read_table(self):
path = os.path.join(HERE, 'data', 'does_not_exist.' + 'csv')
with pytest.raises(FileNotFoundError):
with tm.assert_produces_warning(FutureWarning):
pd.read_table(path)
@pytest.mark.parametrize('reader, module, path', [
(pd.read_csv, 'os', ('io', 'data', 'iris.csv')),
(pd.read_fwf, 'os', ('io', 'data', 'fixed_width_format.txt')),
(pd.read_excel, 'xlrd', ('io', 'data', 'test1.xlsx')),
(pd.read_feather, 'feather', ('io', 'data', 'feather-0_3_1.feather')),
(pd.read_hdf, 'tables', ('io', 'data', 'legacy_hdf',
'datetimetz_object.h5')),
(pd.read_stata, 'os', ('io', 'data', 'stata10_115.dta')),
(pd.read_sas, 'os', ('io', 'sas', 'data', 'test1.sas7bdat')),
(pd.read_json, 'os', ('io', 'json', 'data', 'tsframe_v012.json')),
(pd.read_msgpack, 'os', ('io', 'msgpack', 'data', 'frame.mp')),
(pd.read_pickle, 'os', ('io', 'data', 'categorical_0_14_1.pickle')),
])
def test_read_fspath_all(self, reader, module, path, datapath):
pytest.importorskip(module)
path = datapath(*path)
mypath = CustomFSPath(path)
result = reader(mypath)
expected = reader(path)
if path.endswith('.pickle'):
# categorical
tm.assert_categorical_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
def test_read_fspath_all_read_table(self, datapath):
path = datapath('io', 'data', 'iris.csv')
mypath = CustomFSPath(path)
with tm.assert_produces_warning(FutureWarning):
result = pd.read_table(mypath)
with tm.assert_produces_warning(FutureWarning):
expected = pd.read_table(path)
if path.endswith('.pickle'):
# categorical
tm.assert_categorical_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('writer_name, writer_kwargs, module', [
('to_csv', {}, 'os'),
('to_excel', {'engine': 'xlwt'}, 'xlwt'),
('to_feather', {}, 'feather'),
('to_html', {}, 'os'),
('to_json', {}, 'os'),
('to_latex', {}, 'os'),
('to_msgpack', {}, 'os'),
('to_pickle', {}, 'os'),
('to_stata', {}, 'os'),
])
def test_write_fspath_all(self, writer_name, writer_kwargs, module):
p1 = tm.ensure_clean('string')
p2 = | tm.ensure_clean('fspath') | pandas.util.testing.ensure_clean |
import spotipy
import pandas as pd
from spotipy.oauth2 import SpotifyClientCredentials
#-- IMPORTANT --#
''' for this script to work you have to have a credentials.py file in the same directory
with the following variables
cid = 'YOUR_SPOTIFY_API_CLIENT_ID'
secret = 'YOUR_SPOTIFY_API_CLIENT_SECRET'
'''
from credentials import cid
from credentials import secret
# Spotify API autentication
client_credentials_manager = SpotifyClientCredentials(client_id=cid, client_secret=secret)
sp = spotipy.Spotify(client_credentials_manager = client_credentials_manager)
#-- create new dataframe --#
new_df = pd.DataFrame()
# artist_genre_df = pd.DataFrame()
#-- read dataset --#
song_emotion_df = | pd.read_csv("datasets/tcc_ceds_music.csv", delimiter=',', encoding=None) | pandas.read_csv |
#%%
# from libs.Grafana.config import Config
# from libs.Grafana.dbase import Database
import datetime
import pandas as pd
import numpy as np
import datetime
import time
import logging
import pprint
from time import time
import requests
from influxdb import InfluxDBClient
from influxdb.client import InfluxDBClientError
import datetime
import random
import time
import joblib
import numpy as np
import pandas as pd
from sklearn import ensemble, preprocessing
MODEL = joblib.load("../models/tuned-random-forest-regression-model.pkl")
def preprocess_features(df):
_numeric_features = ["GHI(W/m2)",
"mslp(hPa)",
"rain(mm)",
"rh(%)",
"t2(C)",
"td2(C)",
"wind_dir(Deg)",
"wind_speed(m/s)"]
_ordinal_features = ["AOD",
"day",
"month",
"year"]
standard_scalar = preprocessing.StandardScaler()
Z0 = standard_scalar.fit_transform(df.loc[:, _numeric_features])
ordinal_encoder = preprocessing.OrdinalEncoder()
Z1 = ordinal_encoder.fit_transform(df.loc[:, _ordinal_features])
transformed_features = np.hstack((Z0, Z1))
return transformed_features
def feature_engineering(df):
_dropped_cols = ["SWDIR(W/m2)", "SWDNI(W/m2)", "SWDIF(W/m2)"]
_year = (df.index
.year)
_month = (df.index
.month)
_day = (df.index
.dayofyear)
_hour = (df.index
.hour)
features = (df.drop(_dropped_cols, axis=1, inplace=False)
.assign(year=_year, month=_month, day=_day, hour=_hour)
.groupby(["year", "month", "day", "hour"])
.mean()
.unstack(level=["hour"])
.reset_index(inplace=False)
.sort_index(axis=1)
.drop("year", axis=1, inplace=False))
# create the proxy for our solar power target
efficiency_factor = 0.5
target = (features.loc[:, ["GHI(W/m2)"]]
.mul(efficiency_factor)
.shift(-1)
.rename(columns={"GHI(W/m2)": "target(W/m2)"}))
# combine to create the input data
input_data = (features.join(target)
.dropna(how="any", inplace=False)
.sort_index(axis=1))
return input_data
class Database(object):
def __init__(self):
super(Database, self).__init__()
self.logger = logging.getLogger(__name__)
self.INFLUX_DBASE_HOST='172.16.17.32'
self.INFLUX_DBASE_PORT=8086
self.INFLUX_DBASE_NAME='NEOM'
print(f"INFLUX_DBASE_HOST: {str(self.INFLUX_DBASE_HOST)}, INFLUX_DBASE_PORT: {str(self.INFLUX_DBASE_PORT)}, INFLUX_DBASE_NAME: {str(self.INFLUX_DBASE_NAME)}")
self.client = InfluxDBClient(self.INFLUX_DBASE_HOST,self.INFLUX_DBASE_PORT, self.INFLUX_DBASE_NAME)
self.client.switch_database(self.INFLUX_DBASE_NAME)
self.create()
#---------------------------------------------------------------------------------
self.yesterday = yesterday = datetime.date.today() - datetime.timedelta(days=1)
#Yesterday at midnight
self.yesterday0 = datetime.datetime.combine(self.yesterday, datetime.time.min)
def create(self):
try :
self.client.create_database(self.INFLUX_DBASE_NAME)
except requests.exceptions.ConnectionError as e:
self.logger.warning("CONNECTION ERROR %s" %e)
self.logger.warning("try again")
def log(self,interval, obj,seriesName,value):
records = []
print(interval)
now = self.yesterday0 + datetime.timedelta(minutes=15*interval)
print(now)
if value != None:
try:
floatValue = float(value)
except:
floatValue = None
if floatValue != None:
#---------------------------------------------------------------------------------
record = { "time": now,
"measurement":seriesName,
"tags" : { "object" : obj },
"fields" : { "value" : floatValue },
}
records.append(record)
self.logger.info("writing: %s" % str(records))
try:
res= self.client.write_points(records) # , retention_policy=self.retention_policy)
except requests.exceptions.ConnectionError as e:
self.logger.warning("CONNECTION ERROR %s" %e)
self.logger.warning("try again")
self.create()
#---------------------------------------------------------------------------------
# print (res)
# assert res
def post(self, now, tag_dict, seriesName, value):
records = []
if value != None:
try:
floatValue = float(value)
except:
floatValue = None
if floatValue != None:
#--------------------------------------------------------------------------------- - datetime.timedelta(days=1)
record = { "time": now ,
"measurement":seriesName,
"tags" : tag_dict,
"fields" : { "value" : floatValue },
}
records.append(record)
self.logger.info("writing: %s" % str(records))
try:
res= self.client.write_points(records) # , retention_policy=self.retention_policy)
except requests.exceptions.ConnectionError as e:
self.logger.warning("CONNECTION ERROR %s" %e)
self.logger.warning("try again")
self.create()
def postArray(self, tag_dict, seriesName, values):
records = []
if values != None:
for row in values:
d = list(row.values())[0]
f = list(row.values())[1]
record = { "time": d ,
"measurement":seriesName,
"tags" : tag_dict,
"fields" : { "value" : f },
}
records.append(record)
self.logger.info("writing: %s" % str(records))
self.logger.info(f"len ======================> {str(len(records))}" )
try:
res= self.client.write_points(records) # , retention_policy=self.retention_policy)
except requests.exceptions.ConnectionError as e:
self.logger.warning("CONNECTION ERROR %s" %e)
self.logger.warning("try again")
self.create()
def __destroy__(self):
self.client.drop_database(self.INFLUX_DBASE_NAME)
#%%
dbase = Database()
#%%
import pyowm
#https://github.com/csparpa/pyowm
#https://pyowm.readthedocs.io/en/latest/usage-examples-v2/weather-api-usage-examples.html#owm-weather-api-version-2-5-usage-examples
owm = pyowm.OWM('68f8c0b152aa2c29c1f6123f3cdb4760') # You MUST provide a valid API key
# Have a pro subscription? Then use:
# owm = pyowm.OWM(API_key='your-API-key', subscription_type='pro')
# Search for current weather in London (Great Britain)
observation = owm.weather_at_place('London,GB')
w = observation.get_weather()
from datetime import datetime
now = datetime.now()
#%%
#%%
# load data
#this is an example to load today's forecast. we use historic value as we don't have them
# we assume it will come from weather api , like above
neom_data = (pd.read_csv("../data/raw/neom-data.csv", parse_dates=[0])
.rename(columns={"Unnamed: 0": "Timestamp"})
.set_index("Timestamp", drop=True, inplace=False))
# perform feature engineering
input_data = feature_engineering(neom_data)
# simulate online learning by sampling features from the input data
_prng = np.random.RandomState(42)
new_features = input_data.sample(n=1, random_state=_prng)
# perform inference
processed_features = preprocess_features(new_features)
predictions = MODEL.predict(processed_features)
# print the total solar power produced
print(predictions)
#%%
dates = pd.date_range(start="2019-09-14", end="2019-09-14 23:00:00", freq='H')
print(dates)
#%%
values = []
for i, d in enumerate(dates):
date = | pd.to_datetime(d) | pandas.to_datetime |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]),
s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]),
s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == 'x'
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert recons['A'].dtype == np.float64
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index',
columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
def test_constructor_from_items_scalars(self):
# GH 17312
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 4)])
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'],
orient='index')
def test_from_items_deprecation(self):
# GH 17320
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
columns=['col1', 'col2', 'col3'],
orient='index')
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
tm.assert_frame_equal(df, self.frame.loc[:, ['A', 'B']])
msg = 'does not match index length'
with pytest.raises(ValueError, match=msg):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_records([(8, 5)],
columns=['a', 'a'])
tm.assert_frame_equal(idf, edf)
pytest.raises(ValueError, DataFrame.from_dict,
OrderedDict([('b', 8), ('a', 5), ('a', 6)]))
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df,
DataFrame(np.zeros(df.shape).astype('float64'),
df.index, df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'),
df.index, df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
tm.assert_frame_equal(df, DataFrame(np.array([['a', 'a'], ['a', 'a']],
dtype=object),
index=[1, 2], columns=['a', 'c']))
pytest.raises(ValueError, DataFrame, 'a', [1, 2])
pytest.raises(ValueError, DataFrame, 'a', columns=['a', 'c'])
msg = 'incompatible data and dtype'
with pytest.raises(TypeError, match=msg):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A': 1, 'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime(2001, 1, 2, 0, 0)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname: 2})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array(1., dtype=floatname),
intname: np.array(1, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = {objectname: 1}
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result = result.sort_index()
expected = Series(expected).sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array([1.] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
assert datetime_s.dtype == 'M8[ns]'
df = DataFrame({'datetime_s': datetime_s})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates': dates})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
df = DataFrame([{'End Date': dt}])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
df = DataFrame({'value': dr})
assert df.iat[0, 0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'US/Eastern'
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz='US/Eastern')
expected = DataFrame(
{'a': i.to_series(keep_tz=True).reset_index(drop=True)})
df = DataFrame()
df['a'] = i
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': i})
tm.assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame({'a': i, 'b': i_no_tz})
expected = DataFrame({'a': i.to_series(keep_tz=True)
.reset_index(drop=True), 'b': i_no_tz})
tm.assert_frame_equal(df, expected)
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [np.array([None, None, None, None,
datetime.now(), None]),
np.array([None, None, datetime.now(), None])]:
result = DataFrame(arr).get_dtype_counts()
expected = Series({'datetime64[ns]': 1})
tm.assert_series_equal(result, expected)
def test_constructor_for_list_with_dtypes(self):
# TODO(wesm): unused
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64': 5})
df = DataFrame([np.array(np.arange(5), dtype='int32')
for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32': 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a': [2 ** 31, 2 ** 31 + 1]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame([1., 2.])
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1.}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
# with object list
df = DataFrame({'a': [1, 2, 4, 7], 'b': [1.2, 2.3, 5.1, 6.3],
'c': list('abcd'),
'd': [datetime(2000, 1, 1) for i in range(4)],
'e': [1., 2, 4., 7]})
result = df.get_dtype_counts()
expected = Series(
{'int64': 1, 'float64': 2, datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
assert (cop['A'] == 5).all()
assert not (self.frame['A'] == 5).all()
def test_constructor_ndarray_copy(self):
df = DataFrame(self.frame.values)
self.frame.values[5] = 5
assert (df.values[5] == 5).all()
df = DataFrame(self.frame.values, copy=True)
self.frame.values[6] = 6
assert not (df.values[6] == 6).all()
def test_constructor_series_copy(self):
series = self.frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
assert not (series['A'] == 5).all()
def test_constructor_with_nas(self):
# GH 5016
# na's in indices
def check(df):
for i in range(len(df.columns)):
df.iloc[:, i]
indexer = np.arange(len(df.columns))[isna(df.columns)]
# No NaN found -> error
if len(indexer) == 0:
def f():
df.loc[:, np.nan]
pytest.raises(TypeError, f)
# single nan should result in Series
elif len(indexer) == 1:
tm.assert_series_equal(df.iloc[:, indexer[0]],
df.loc[:, np.nan])
# multiple nans should result in DataFrame
else:
tm.assert_frame_equal(df.iloc[:, indexer],
df.loc[:, np.nan])
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan])
check(df)
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
# GH 21428 (non-unique columns)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1, 2, 2])
check(df)
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({'a': [np.nan, False]})
assert d['a'].dtype == np.object_
assert not d['a'][1]
def test_constructor_categorical(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([Categorical(list('abc')), Categorical(list('abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
pytest.raises(ValueError,
lambda: DataFrame([Categorical(list('abc')),
Categorical(list('abdefg'))]))
# ndim > 1
pytest.raises(NotImplementedError,
lambda: Categorical(np.array([list('abcd')])))
def test_constructor_categorical_series(self):
items = [1, 2, 3, 1]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
items = ["a", "b", "c", "a"]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=('i4,f4,a10'))
arr[:] = [(1, 2., 'Hello'), (2, 3., "World")]
# TODO(wesm): unused
frame = DataFrame.from_records(arr) # noqa
index = pd.Index(np.arange(len(arr))[::-1])
indexed_frame = DataFrame.from_records(arr, index=index)
tm.assert_index_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2, 3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index='f1')
# what to do?
records = indexed_frame.to_records()
assert len(records.dtype.names) == 3
records = indexed_frame.to_records(index=False)
assert len(records.dtype.names) == 2
assert 'index' not in records.dtype.names
def test_from_records_nones(self):
tuples = [(1, 2, None, 3),
(1, 2, None, 3),
(None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=['a', 'b', 'c', 'd'])
assert np.isnan(df['c'][0])
def test_from_records_iterator(self):
arr = np.array([(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5., 5., 6, 6),
(7., 7., 8, 8)],
dtype=[('x', np.float64), ('u', np.float32),
('y', np.int64), ('z', np.int32)])
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame({'x': np.array([1.0, 3.0], dtype=np.float64),
'u': np.array([1.0, 3.0], dtype=np.float32),
'y': np.array([2, 4], dtype=np.int64),
'z': np.array([2, 4], dtype=np.int32)})
tm.assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5., 6), (7., 8)]
df = DataFrame.from_records(iter(arr), columns=['x', 'y'],
nrows=2)
tm.assert_frame_equal(df, xp.reindex(columns=['x', 'y']),
check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield (i, letters[i % len(letters)], i / length)
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in tuple_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = tuple_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_lists_generator(self):
def list_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield [i, letters[i % len(letters)], i / length]
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in list_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = list_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_columns_not_modified(self):
tuples = [(1, 2, 3),
(1, 2, 3),
(2, 5, 3)]
columns = ['a', 'b', 'c']
original_columns = list(columns)
df = DataFrame.from_records(tuples, columns=columns, index='a') # noqa
assert columns == original_columns
def test_from_records_decimal(self):
from decimal import Decimal
tuples = [(Decimal('1.5'),), (Decimal('2.5'),), (None,)]
df = DataFrame.from_records(tuples, columns=['a'])
assert df['a'].dtype == object
df = DataFrame.from_records(tuples, columns=['a'], coerce_float=True)
assert df['a'].dtype == np.float64
assert np.isnan(df['a'].values[-1])
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
expected = DataFrame([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
tm.assert_frame_equal(result, expected)
def test_from_records_set_index_name(self):
def create_dict(order_id):
return {'order_id': order_id, 'quantity': np.random.randint(1, 10),
'price': np.random.randint(1, 10)}
documents = [create_dict(i) for i in range(10)]
# demo missing data
documents.append({'order_id': 10, 'quantity': 5})
result = DataFrame.from_records(documents, index='order_id')
assert result.index.name == 'order_id'
# MultiIndex
result = DataFrame.from_records(documents,
index=['order_id', 'quantity'])
assert result.index.names == ('order_id', 'quantity')
def test_from_records_misc_brokenness(self):
# #2179
data = {1: ['foo'], 2: ['bar']}
result = DataFrame.from_records(data, columns=['a', 'b'])
exp = DataFrame(data, columns=['a', 'b'])
tm.assert_frame_equal(result, exp)
# overlap in index/index_names
data = {'a': [1, 2, 3], 'b': [4, 5, 6]}
result = DataFrame.from_records(data, index=['a', 'b', 'c'])
exp = DataFrame(data, index=['a', 'b', 'c'])
tm.assert_frame_equal(result, exp)
# GH 2623
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi']) # test col upconverts to obj
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({'datetime64[ns]': 1, 'object': 1})
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 1])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts().sort_index()
expected = Series({'datetime64[ns]': 1, 'int64': 1})
tm.assert_series_equal(results, expected)
def test_from_records_empty(self):
# 3562
result = DataFrame.from_records([], columns=['a', 'b', 'c'])
expected = DataFrame(columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
result = DataFrame.from_records([], columns=['a', 'b', 'b'])
expected = DataFrame(columns=['a', 'b', 'b'])
tm.assert_frame_equal(result, expected)
def test_from_records_empty_with_nonempty_fields_gh3682(self):
a = np.array([(1, 2)], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(a, index='id')
tm.assert_index_equal(df.index, Index([1], name='id'))
assert df.index.name == 'id'
tm.assert_index_equal(df.columns, Index(['value']))
b = np.array([], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(b, index='id')
tm.assert_index_equal(df.index, | Index([], name='id') | pandas.Index |
import sys,os
import pathlib
import joblib
import pandas as pd
import numpy as np
import spacy
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from pickle import dump, load
import string
def punct_space(token):
"""
helper function to eliminate tokens
that are pure punctuation or whitespace
"""
return token.is_punct or token.is_space
def lemmatize(doc):
"""
function that tokenize the text, lemmatizes it and removes stop words.
"""
nlp = spacy.load('en_core_web_sm')
parsed_doc = nlp(doc)
lemm_doc = [token.lemma_ for token in parsed_doc
if not punct_space(token) and (token.lemma_!= '-PRON-') and not(nlp.vocab[token.text].is_stop)]
# write the transformed text
clean_text = u' '.join(lemm_doc)
return clean_text
def countVec(self, article):
cvec = CountVectorizer(stop_words='english', min_df = 3)
cvec.fit(article)
cvec_counts = cvec.transform(article)
return cvec_counts
nlp = spacy.load('en_core_web_sm')
def spacy_tokenizer(doc):
"""Function that serves as tokenizer in our pipeline
Loads the 'en_core_web_sm' model, tokenize the string and perform pre processing.
Preprocessing includes lemmatizing tokens as well as removing stop words and punctuations.
Args:
doc(str): sentence to tokenize.
Returns:
list: preprocessed tokens.
"""
punctuations = string.punctuation
stop_words = spacy.lang.en.stop_words.STOP_WORDS
tokens = nlp(doc)
# Lemmatizing each token and converting each token into lowercase
tokens = [word.lemma_.lower() for word in tokens if not word.is_space]
# Removing stop words and punctuations
tokens = [ word for word in tokens if word not in stop_words and word not in punctuations ]
# return preprocessed list of tokens
return tokens
class TopicTrain(object):
def __init__(self):
#self.data_path=os.path.join(pathlib.Path().absolute(), data_path)
pass
def _loadArticles(self,path):
"""Loads the articles from each folder inside the path provided. Each folder will represent
the category(label) of the articles.
Args:
path(str): path where the function will find the article subfolders.
Returns:
DataFrame: containing the articles and their category
"""
cat_article = []
for subdir, dirs, files in os.walk(path):
for file in files:
if '.txt' in file:
category = subdir.split('/')[-1]
f = open(os.path.join(subdir, file),'r', encoding='utf-8', errors='ignore')
lines = f.readlines()
lines = ' '.join(lines).replace('\n','')
#list of lists: [category,article]
cat_article.append([category,lines])
f.close()
data = pd.DataFrame(cat_article)
data.columns = ['category','article']
return data
def train(self):
""" Creates a pipeline and trains it. The pipeline contains one preprocessing step
and the trainin of a random forest model.
"""
articles_df = self._loadArticles('data')
#articles_df['article_lemmatized']=articles_df.article.map(lemmatize)
nlp = spacy.load('en_core_web_sm')
text_clf = Pipeline([('tfidf', TfidfVectorizer(tokenizer=spacy_tokenizer,min_df=3)),\
('clf', RandomForestClassifier())])
text_clf.fit(articles_df['article'], articles_df['category'])
model_path = os.path.join(str(pathlib.Path().absolute()), "model")
model_file = model_path + "/rm_tfidf.pkl"
if not os.path.isdir(model_path):
os.makedirs(model_path)
dump(text_clf, open(model_file, 'wb'))
class TopicPredict(object):
def __init__(self):
try:
sys.path.index(os.path.join(str(pathlib.Path().absolute()), "articles"))
except ValueError:
sys.path.append(os.path.join(str(pathlib.Path().absolute()), "articles"))
model_path = os.path.join(str(pathlib.Path().absolute()), "articles/model")
model_file = model_path + "/rm_tfidf.pkl"
self.model = joblib.load(model_file)
def _important_tokens(self,inp):
""" Creates a dataframe with the top 10 most important tokens of the input article.
The most important features are taken from the random forest model.
Args:
inp(str): article to process.
Returns:
DataFrame: Top 10 tokens sorted by descending order of improtance.
"""
tokens = spacy_tokenizer(inp)
arr = []
for token in tokens:
#get the index of the token in the model's vocabulary
idx = self.model.steps[0][1].vocabulary_.get(token)
if idx is not None:#Some tokens doesnt appear in the corpus.
importance = self.model.steps[1][1].feature_importances_[idx]
arr.append({'TOKEN':token, 'Importance':importance})
imp_df = | pd.DataFrame(arr) | pandas.DataFrame |
# coding: utf-8
# In[ ]:
from __future__ import division
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import OneHotEncoder,LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
from scipy import sparse
import xgboost as xgb
import lightgbm as lgb
import cPickle
import time
import datetime
import math
import os
from multiprocessing import cpu_count
import gc
import warnings
warnings.filterwarnings('ignore')
# In[ ]:
# Constants define
ROOT_PATH = '/home/kaholiu/xiaoxy/2018-Tencent-Lookalike/'
ONLINE = 0
# In[ ]:
target = 'label'
train_len = 45539700 # 8798814
test1_len = 11729073 # 2265989
test2_len = 11727304 # 2265879
positive_num = 2182403 # 421961
# In[ ]:
########################################### Helper function ###########################################
# In[ ]:
def log(info):
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' ' + str(info))
# In[ ]:
def merge_count(df, columns_groupby, new_column_name, type='uint64'):
add = pd.DataFrame(df.groupby(columns_groupby).size()).reset_index()
add.columns = columns_groupby + [new_column_name]
df = df.merge(add, on=columns_groupby, how="left"); del add; gc.collect()
df[new_column_name] = df[new_column_name].astype(type)
return df
# In[ ]:
def preprocess_word(texts):
pre_texts = []
for text in texts:
words = text.split()
pre_words = []
for word in words:
pre_words.append('W' + word)
pre_text = ' '.join(pre_words)
pre_texts.append(pre_text)
return pre_texts
# In[ ]:
def down_sample(df, df_feat):
df_majority = df_feat[df[target]==0]
df_minority = df_feat[df[target]==1]
df_majority_downsampled = resample(df_majority,
replace=False, # sample without replacement
n_samples=positive_num*3, # to match minority class
random_state=7) # reproducible results
df_downsampled = pd.concat([df_majority_downsampled, df_minority])
del df_majority, df_minority, df_majority_downsampled
return df_downsampled
# In[ ]:
########################################### Read data ###########################################
# In[ ]:
ad_feature = pd.read_csv(ROOT_PATH + 'data/input/final/adFeature.csv', header=0, sep=',')
user_feature = pd.read_csv(ROOT_PATH + 'data/input/final/userFeature.csv', header=0, sep=',')
train = pd.read_csv(ROOT_PATH + 'data/input/final/train.csv', header=0, sep=',')
test1 = pd.read_csv(ROOT_PATH + 'data/input/final/test1.csv', header=0, sep=',')
test2 = pd.read_csv(ROOT_PATH + 'data/input/final/test2.csv', header=0, sep=',')
test = test1.append(test2).reset_index(drop=True); del test1, test2; gc.collect()
df = train.append(test).reset_index(drop=True)
df = df.merge(ad_feature, on='aid', how='left')
df = df.merge(user_feature, on='uid', how='left')
del train, test, ad_feature, user_feature; gc.collect()
df.loc[df[target] == -1, target] = 0
df.loc[train_len:, target] = -1
df[target] = df[target].astype(int)
df = df.fillna('-1')
# In[ ]:
########################################### Preprocess ###########################################
# In[ ]:
onehot_feature = ['LBS', 'age', 'carrier', 'consumptionAbility', 'education', 'gender', 'house', 'os', 'ct', 'advertiserId', 'campaignId', 'creativeId', 'adCategoryId', 'productId', 'productType']
for feature in onehot_feature:
log(feature)
try:
df[feature] = LabelEncoder().fit_transform(df[feature].apply(int))
except:
df[feature] = LabelEncoder().fit_transform(df[feature])
# In[ ]:
vector_feature = ['appIdAction', 'appIdInstall', 'interest1', 'interest2', 'interest3', 'interest4', 'interest5', 'kw1', 'kw2', 'kw3', 'marriageStatus', 'topic1', 'topic2', 'topic3']
for feature in vector_feature:
log(feature)
df[feature] = preprocess_word(df[feature].values)
# In[ ]:
# Save aid+uid+label+creativeSize columns
df_downsampled = down_sample(df, df[['aid', 'uid', 'label', 'creativeSize']])
cPickle.dump(df_downsampled, open(ROOT_PATH + 'data/output/lgb/final/feat/train+test1+test2/all(basic).p', 'wb')); del df_downsampled; gc.collect()
# In[ ]:
########################################### Feature engineer ###########################################
# In[ ]:
log('Before feature engineer')
log('Num of columns: ' + str(len(df.columns)))
log('columns: ' + str(df.columns))
# In[ ]:
# Stat features
predictors_stat1 = []
gb_list = ['uid', 'aid', 'LBS', 'age', 'carrier', 'consumptionAbility', 'education', 'gender', 'house', 'os', 'ct', 'advertiserId', 'campaignId', 'creativeId', 'adCategoryId', 'productId', 'productType', 'marriageStatus']
for i in gb_list:
log(i)
df = merge_count(df, [i], 'count_gb_' + i, 'uint32')
predictors_stat1.append('count_gb_' + i)
gb_list = ['LBS', 'age', 'carrier', 'consumptionAbility', 'education', 'gender', 'house', 'os', 'ct']
for i in gb_list:
log('aid_' + i)
df = merge_count(df, ['aid', i], 'count_gb_aid_' + i, 'uint32')
predictors_stat1.append('count_gb_aid_' + i)
# Save features
df_downsampled = down_sample(df, df[predictors_stat1])
df.drop(predictors_stat1, axis=1, inplace=True); gc.collect()
cPickle.dump(df_downsampled, open(ROOT_PATH + 'data/output/lgb/final/feat/train+test1+test2/all(stat1).p', "wb")); del df_downsampled; gc.collect()
# In[ ]:
# Stat features
predictors_stat2 = []
gb_user = ['uid', 'age', 'LBS']
gb_list = ['advertiserId', 'campaignId', 'creativeId', 'adCategoryId', 'productId', 'productType']
for u in gb_user:
for i in gb_list:
log(u + '_' + i)
df = merge_count(df, [u, i], 'count_gb_%s_%s' % (u, i), 'uint32')
predictors_stat2.append('count_gb_%s_%s' % (u, i))
# Save features
df_downsampled = down_sample(df, df[predictors_stat2])
df.drop(predictors_stat2, axis=1, inplace=True); gc.collect()
cPickle.dump(df_downsampled, open(ROOT_PATH + 'data/output/lgb/final/feat/train+test1+test2/all(stat2).p', "wb")); del df_downsampled; gc.collect()
# In[ ]:
# Stat features
predictors_stat3 = []
vector_feature = ['appIdAction', 'appIdInstall', 'interest1', 'interest2', 'interest3', 'interest4', 'interest5', 'kw1', 'kw2', 'kw3', 'marriageStatus', 'topic1', 'topic2', 'topic3']
for feature in vector_feature:
log(feature)
df['len_' + feature] = [0 if var == 'W-1' else len(var.split()) for var in df[feature].values]
predictors_stat3.append('len_' + feature)
# Save features
df_downsampled = down_sample(df, df[predictors_stat3])
df.drop(predictors_stat3, axis=1, inplace=True); gc.collect()
cPickle.dump(df_downsampled, open(ROOT_PATH + 'data/output/lgb/final/feat/train+test1+test2/all(stat3).p', "wb")); del df_downsampled; gc.collect()
# In[ ]:
# Stat features
df_stat1 = cPickle.load(open(ROOT_PATH + 'data/output/lgb/final/feat/train+test1+test2/all(stat1).p', 'rb'))
df_stat2 = cPickle.load(open(ROOT_PATH + 'data/output/lgb/final/feat/train+test1+test2/all(stat2).p', 'rb'))
df_stat = pd.concat([df_stat1, df_stat2], axis=1); del df_stat1, df_stat2; gc.collect()
predictors_stat4 = []
gb_list = ['LBS', 'age', 'carrier', 'consumptionAbility', 'education', 'gender', 'house', 'os', 'ct']
for i in gb_list:
log('aid_' + i)
df_stat['ratio_aid_%s_to_aid' % i] = df_stat['count_gb_aid_' + i].astype(float) / df_stat['count_gb_aid']
df_stat['ratio_aid_%s_to_%s' % (i, i)] = df_stat['count_gb_aid_' + i].astype(float) / df_stat['count_gb_' + i]
predictors_stat4.append('ratio_aid_%s_to_aid' % i)
predictors_stat4.append('ratio_aid_%s_to_%s' % (i, i))
gb_user = ['uid', 'age', 'LBS']
gb_list = ['advertiserId', 'campaignId', 'creativeId', 'adCategoryId', 'productId', 'productType']
for u in gb_user:
for i in gb_list:
log(u + '_' + i)
df_stat['ratio_%s_%s_to_%s' % (u, i, u)] = df_stat['count_gb_%s_%s' % (u, i)] / df_stat['count_gb_' + u]
df_stat['ratio_%s_%s_to_%s' % (u, i, i)] = df_stat['count_gb_%s_%s' % (u, i)] / df_stat['count_gb_' + i]
predictors_stat4.append('ratio_%s_%s_to_%s' % (u, i, u))
predictors_stat4.append('ratio_%s_%s_to_%s' % (u, i, i))
# Save features
cPickle.dump(df_stat[predictors_stat4], open(ROOT_PATH + 'data/output/lgb/final/feat/train+test1+test2/all(stat4).p', "wb")); del df_stat; gc.collect()
# In[ ]:
gb_list = ['age', 'carrier', 'consumptionAbility', 'education']
predictors_aduser = []
df_aduser = df[[]]
for i in gb_list:
log(i)
column_name = i
df_onehot = pd.get_dummies(df[column_name], prefix=column_name)
df_tmp = df_onehot.groupby(df.aid).transform(np.mean)
df_tmp.columns = [i + '_gb_aid' for i in df_tmp.columns]
df_aduser = pd.concat([df_aduser, df_tmp], axis=1)
predictors_aduser += list(df_tmp.columns.values)
df_tmp = df_tmp.groupby(df.uid).transform(np.mean)
df_tmp.columns = [i + '_gb_uid' for i in df_tmp.columns]
df_aduser = pd.concat([df_aduser, df_tmp], axis=1)
predictors_aduser += list(df_tmp.columns.values)
log(predictors_aduser)
# Save features
df_downsampled = down_sample(df, df_aduser)
cPickle.dump(df_downsampled, open(ROOT_PATH + 'data/output/lgb/final/feat/train+test1+test2/all(aduser1).p', "wb")); del df_downsampled, df_aduser; gc.collect()
# In[ ]:
gb_list = ['gender', 'house', 'os', 'productType'] # 'adCategoryId', 'productId'
predictors_aduser = []
df_aduser = df[[]]
for i in gb_list:
log(i)
column_name = i
df_onehot = pd.get_dummies(df[column_name], prefix=column_name)
df_tmp = df_onehot.groupby(df.aid).transform(np.mean)
df_tmp.columns = [i + '_gb_aid' for i in df_tmp.columns]
df_aduser = pd.concat([df_aduser, df_tmp], axis=1)
predictors_aduser += list(df_tmp.columns.values)
df_tmp = df_tmp.groupby(df.uid).transform(np.mean)
df_tmp.columns = [i + '_gb_uid' for i in df_tmp.columns]
df_aduser = pd.concat([df_aduser, df_tmp], axis=1)
predictors_aduser += list(df_tmp.columns.values)
log(predictors_aduser)
# Save features
df_downsampled = down_sample(df, df_aduser)
cPickle.dump(df_downsampled, open(ROOT_PATH + 'data/output/lgb/final/feat/train+test1+test2/all(aduser2).p', "wb")); del df_downsampled, df_aduser; gc.collect()
# In[ ]:
gb_list = ['age', 'carrier', 'consumptionAbility', 'education']
predictors_userad = []
df_userad = df[[]]
for i in gb_list:
log(i)
column_name = i
df_onehot = | pd.get_dummies(df[column_name], prefix=column_name) | pandas.get_dummies |
# -*- coding: utf-8 -*-
import base64
import logging
from pathlib import Path
from zipfile import ZipFile
import numpy as np
import pandas as pd
import streamlit as st
from PIL import Image
def create_dataframe(tissue_final: float, fibrosis_final: float, csv_filename: str) -> None:
data = [[tissue_final, fibrosis_final]]
df = | pd.DataFrame(data, columns=["tissue_percentage", "fibrosis_percentage"]) | pandas.DataFrame |
# RHR Online Anomaly Detection & Alert Monitoring
######################################################
# Author: <NAME> #
# Email: <EMAIL> #
# Location: Dept.of Genetics, Stanford University #
# Date: Oct 29 2020 #
######################################################
# uses raw heart rate and steps data (this stpes data doesn't have zeroes and need to innfer from hr datetime stamp)
## simple command
# python rhrad_online_alerts.py --heart_rate hr.csv --steps steps.csv
## full command
# python rhrad_online_alerts.py --heart_rate pbb_fitbit_oldProtocol_hr.csv --steps pbb_fitbit_oldProtocol_steps.csv --myphd_id pbb_RHR_online --figure1 pbb_RHR_online_anomalies.pdf --anomalies pbb_RHR_online_anomalies.csv --symptom_date 2020-01-10 --diagnosis_date 2020-01-11 --outliers_fraction 0.1 --random_seed 10 --baseline_window 744 --sliding_window 1 --alerts pbb_RHR_online_alerts.csv --figure2 pbb_RHR_online_alerts.pdf
# python rhrad_online_alerts.py --heart_rate pbb_fitbit_oldProtocol_hr.csv \
# --steps pbb_fitbit_oldProtocol_steps.csv \
# --myphd_id pbb_RHR_online \
# --figure1 pbb_RHR_online_anomalies.pdf \
# --anomalies pbb_RHR_online_anomalies.csv \
# --symptom_date 2020-01-10 --diagnosis_date 2020-01-11 \
# --outliers_fraction 0.1 \
# --random_seed 10 \
# --baseline_window 744 --sliding_window 1
# --alerts pbb_RHR_online_alerts.csv \
# --figure2 pbb_RHR_online_alerts.pdf
import warnings
warnings.filterwarnings('ignore')
import sys
import argparse
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
#%matplotlib inline
import seaborn as sns
from statsmodels.tsa.seasonal import seasonal_decompose
from sklearn.preprocessing import StandardScaler
from sklearn.covariance import EllipticEnvelope
####################################
parser = argparse.ArgumentParser(description='Find anomalies in wearables time-series data.')
parser.add_argument('--heart_rate', metavar='', help ='raw heart rate count with a header = heartrate')
parser.add_argument('--steps',metavar='', help ='raw steps count with a header = steps')
parser.add_argument('--myphd_id',metavar='', default = 'myphd_id', help ='user myphd_id')
parser.add_argument('--anomalies', metavar='', default = 'myphd_id_anomalies.csv', help='save predicted anomalies as a CSV file')
parser.add_argument('--figure1', metavar='', default = 'myphd_id_anomalies.pdf', help='save predicted anomalies as a PDF file')
parser.add_argument('--symptom_date', metavar='', default = 'NaN', help = 'symptom date with y-m-d format')
parser.add_argument('--diagnosis_date', metavar='', default = 'NaN', help='diagnosis date with y-m-d format')
parser.add_argument('--outliers_fraction', metavar='', type=float, default=0.1, help='fraction of outliers or anomalies')
parser.add_argument('--random_seed', metavar='', type=int, default=10, help='random seed')
parser.add_argument('--baseline_window', metavar='',type=int, default=744, help='baseline window is used for training (in hours)')
parser.add_argument('--sliding_window', metavar='',type=int, default=1, help='sliding window is used to slide the testing process each hour')
parser.add_argument('--alerts', metavar='', default = 'myphd_id_alerts.csv', help='save predicted anomalies as a CSV file')
parser.add_argument('--figure2', metavar='', default = 'myphd_id_alerts.pdf', help='save predicted anomalies as a PDF file')
args = parser.parse_args()
# as arguments
fitbit_oldProtocol_hr = args.heart_rate
fitbit_oldProtocol_steps = args.steps
myphd_id = args.myphd_id
myphd_id_anomalies = args.anomalies
myphd_id_figure1 = args.figure1
symptom_date = args.symptom_date
diagnosis_date = args.diagnosis_date
RANDOM_SEED = args.random_seed
outliers_fraction = args.outliers_fraction
baseline_window = args.baseline_window
sliding_window = args.sliding_window
myphd_id_alerts = args.alerts
myphd_id_figure2 = args.figure2
####################################
class RHRAD_online:
# Infer resting heart rate ------------------------------------------------------
def resting_heart_rate(self, heartrate, steps):
"""
This function uses heart rate and steps data to infer resting heart rate.
It filters the heart rate with steps that are zero and also 12 minutes ahead.
"""
# heart rate data
df_hr = pd.read_csv(fitbit_oldProtocol_hr)
df_hr = df_hr.set_index('datetime')
df_hr.index.name = None
df_hr.index = pd.to_datetime(df_hr.index)
# steps data
df_steps = pd.read_csv(fitbit_oldProtocol_steps)
df_steps = df_steps.set_index('datetime')
df_steps.index.name = None
df_steps.index = pd.to_datetime(df_steps.index)
# merge dataframes
#df_hr = df_hr.resample('1min').mean()
#df_steps = df_steps.resample('1min').mean()
# added "outer" paramter for merge function to adjust the script to the new steps format
#df1 = pd.merge(df_hr, df_steps, left_index=True, right_index=True)
df1 = pd.merge(df_hr, df_steps, left_index=True, right_index=True, how="outer")
df1 = df1[pd.isnull(df1).any(axis=1)].fillna(0)
df1 = df1.rename(columns={"value_x": "heartrate", "value_y": "steps"})
df1 = df1.resample('1min').mean()
print(myphd_id)
print("Data size (in miutes) before removing missing data")
print(df1.shape)
ax = df1.plot(figsize=(20,4), title=myphd_id)
ax.figure.savefig(myphd_id+'_data.png')
#print(df1)
df1 = df1.dropna(how='any')
df1 = df1.loc[df1['heartrate']!=0]
print("Data size (in miutes) after removing missing data")
print(df1.shape)
#print(df1)
# define RHR as the HR measurements recorded when there were less than two steps taken during a rolling time window of the preceding 12 minutes (including the current minute)
df1['steps'] = df1['steps'].apply(np.int64)
df1['steps_window_12'] = df1['steps'].rolling(12).sum()
df1 = df1.loc[(df1['steps_window_12'] == 0 )]
print(df1['heartrate'].describe())
print(df1['steps_window_12'].describe())
# impute missing data
#df1 = df1.resample('1min').mean()
#df1 = df1.ffill()
print("No.of timesteps for RHR (in minutes)")
print(df1.shape)
return df1
# Pre-processing ------------------------------------------------------
def pre_processing(self, resting_heart_rate):
"""
This function takes resting heart rate data and applies moving averages to smooth the data and
downsamples to one hour by taking the avegare values
"""
# smooth data
df_nonas = df1.dropna()
df1_rom = df_nonas.rolling(400).mean()
# resample
df1_resmp = df1_rom.resample('1H').mean()
df2 = df1_resmp.drop(['steps'], axis=1)
df2 = df2.dropna()
print("No.of timesteps for RHR (in hours)")
print(df2.shape)
return df2
# Seasonality correction ------------------------------------------------------
def seasonality_correction(self, resting_heart_rate, steps):
"""
This function takes output pre-processing and applies seasonality correction
"""
sdHR_decomposition = seasonal_decompose(sdHR, model='additive', freq=1)
sdSteps_decomposition = seasonal_decompose(sdSteps, model='additive', freq=1)
sdHR_decomp = pd.DataFrame(sdHR_decomposition.resid + sdHR_decomposition.trend)
sdHR_decomp.rename(columns={sdHR_decomp.columns[0]:'heartrate'}, inplace=True)
sdSteps_decomp = pd.DataFrame(sdSteps_decomposition.resid + sdSteps_decomposition.trend)
sdSteps_decomp.rename(columns={sdSteps_decomp.columns[0]:'steps_window_12'}, inplace=True)
frames = [sdHR_decomp, sdSteps_decomp]
data = pd.concat(frames, axis=1)
#print(data)
#print(data.shape)
return data
# Train model and predict anomalies ------------------------------------------------------
def online_anomaly_detection(self, data_seasnCorec, baseline_window, sliding_window):
"""
# split the data, standardize the data inside a sliding window
# parameters - 1 month baseline window and 1 hour sliding window
# fit the model and predict the test set
"""
for i in range(baseline_window, len(data_seasnCorec)):
data_train_w = data_seasnCorec[i-baseline_window:i]
# train data normalization ------------------------------------------------------
data_train_w += 0.1
standardizer = StandardScaler().fit(data_train_w.values)
data_train_scaled = standardizer.transform(data_train_w.values)
data_train_scaled_features = pd.DataFrame(data_train_scaled, index=data_train_w.index, columns=data_train_w.columns)
data = pd.DataFrame(data_train_scaled_features)
data_1 = pd.DataFrame(data).fillna(0)
data_1['steps'] = '0'
data_1['steps_window_12'] = (data_1['steps'])
data_train_w = data_1
data_train.append(data_train_w)
data_test_w = data_seasnCorec[i:i+sliding_window]
# test data normalization ------------------------------------------------------
data_test_w += 0.1
data_test_scaled = standardizer.transform(data_test_w.values)
data_scaled_features = pd.DataFrame(data_test_scaled, index=data_test_w.index, columns=data_test_w.columns)
data = pd.DataFrame(data_scaled_features)
data_1 = pd.DataFrame(data).fillna(0)
data_1['steps'] = '0'
data_1['steps_window_12'] = (data_1['steps'])
data_test_w = data_1
data_test.append(data_test_w)
# fit the model ------------------------------------------------------
model = EllipticEnvelope(random_state=RANDOM_SEED,
support_fraction=0.7,
contamination=outliers_fraction).fit(data_train_w)
# predict the test set
preds = model.predict(data_test_w)
#preds = preds.rename(lambda x: 'anomaly' if x == 0 else x, axis=1)
dfs.append(preds)
# Merge predictions ------------------------------------------------------
def merge_test_results(self, data_test):
"""
Merge predictions
"""
# concat all test data (from sliding window) with their datetime index and others
data_test = pd.concat(data_test)
# merge predicted anomalies from test data with their corresponding index and other features
preds = pd.DataFrame(dfs)
preds = preds.rename(lambda x: 'anomaly' if x == 0 else x, axis=1)
data_test_df = pd.DataFrame(data_test)
data_test_df = data_test_df.reset_index()
data_test_preds = data_test_df.join(preds)
return data_test_preds
# Positive Anomalies -----------------------------------------------------------------
"""
Selects anomalies in positive direction and saves in a CSV file
"""
def positive_anomalies(self, data):
a = data.loc[data['anomaly'] == -1, ('index', 'heartrate')]
positive_anomalies = a[(a['heartrate']> 0)]
# Anomaly results
positive_anomalies['Anomalies'] = myphd_id
positive_anomalies.columns = ['datetime', 'std.rhr', 'name']
positive_anomalies.to_csv(myphd_id_anomalies, header=True)
return positive_anomalies
# Alerts ------------------------------------------------------
def create_alerts(self, anomalies, data, fitbit_oldProtocol_hr):
"""
# creates alerts at every 24 hours and send at 9PM.
# visualise alerts
"""
# function to assign different alert names
# summarize hourly alerts
def alert_types(alert):
if alert['alerts'] >=6:
return 'RED'
elif alert['alerts'] >=1:
return 'YELLOW'
else:
return 'GREEN'
# summarize hourly alerts
#anomalies.columns = ['datetime', 'std.rhr', 'name']
anomalies = anomalies[['datetime']]
anomalies['datetime'] = pd.to_datetime(anomalies['datetime'], errors='coerce')
anomalies['alerts'] = 1
anomalies = anomalies.set_index('datetime')
anomalies = anomalies[~anomalies.index.duplicated(keep='first')]
anomalies = anomalies.sort_index()
alerts = anomalies.groupby(pd.Grouper(freq = '24H', base=21)).cumsum()
# apply alert_types function
alerts['alert_type'] = alerts.apply(alert_types, axis=1)
alerts_reset = alerts.reset_index()
#print(alerts_reset)
# save alerts
#alerts.to_csv(myphd_id_alerts, mode='a', header=True)
# summarize hourly alerts to daily alerts
daily_alerts = alerts_reset.resample('24H', on='datetime', base=21, label='right').count()
daily_alerts = daily_alerts.drop(['datetime'], axis=1)
#print(daily_alerts)
# function to assign different alert names
def alert_types(alert):
if alert['alert_type'] >=6:
return 'RED'
elif alert['alert_type'] >=1:
return 'YELLOW'
else:
return 'GREEN'
# apply alert_types function
daily_alerts['alert_type'] = daily_alerts.apply(alert_types, axis=1)
# merge missing 'datetime' with 'alerts' as zero aka GREEN
data1 = data[['index']]
data1['alert_type'] = 0
data1 = data1.rename(columns={"index": "datetime"})
data1['datetime'] = pd.to_datetime(data1['datetime'], errors='coerce')
data1 = data1.resample('24H', on='datetime', base=21, label='right').count()
data1 = data1.drop(data1.columns[[0,1]], axis=1)
data1 = data1.reset_index()
data1['alert_type'] = 0
data3 = pd.merge(data1, daily_alerts, on='datetime', how='outer')
data4 = data3[['datetime', 'alert_type_y']]
data4 = data4.rename(columns={ "alert_type_y": "alert_type"})
daily_alerts = data4.fillna("GREEN")
daily_alerts = daily_alerts.set_index('datetime')
daily_alerts = daily_alerts.sort_index()
# merge alerts with main data and pass 'NA' when there is a missing day instead of 'GREEN'
df_hr = pd.read_csv(fitbit_oldProtocol_hr)
df_hr['datetime'] = pd.to_datetime(df_hr['datetime'], errors='coerce')
df_hr = df_hr.resample('24H', on='datetime', base=21, label='right').mean()
df_hr = df_hr.reset_index()
df_hr = df_hr.set_index('datetime')
df_hr.index.name = None
df_hr.index = | pd.to_datetime(df_hr.index) | pandas.to_datetime |
### HI_Waterbird_Repro_DataJoinMerge_v3.py
### Version: 5/7/2020
### Author: <NAME>, <EMAIL>, (503) 231-6839
### Abstract: This Python 3 script pulls data from the HI Waterbirds Reproductive Success ArcGIS Online feature service and performs joins and merges to result in a combined CSV dataset.
import arcpy
import pandas as pd
from arcgis import GIS
import time, os, fnmatch, shutil
### ArcGIS Online stores date-time information in UTC by default. This function converts time zones and can be used to convert from UTC ("UTC") to Hawaii standard time ("US/Hawaii"; UTC -10).
from datetime import datetime
from pytz import timezone
def change_timezone_of_field(df, source_date_time_field, new_date_time_field, source_timezone, new_timezone):
"""Returns the values in *source_date_time_field* with its timezone converted to a new timezone within a new field *new_date_time_field*
: param df: The name of the spatially enabled or pandas DataFrame containing datetime fields
: param source_date_time_field: The name of the datetime field whose timezone is to be changed
: param new_date_time_field: The name of the new datetime field
: param source_timezone: The name of the source timezone
: param new_timezone: The name of the converted timezone. For possible values, see https://gist.github.com/heyalexej/8bf688fd67d7199be4a1682b3eec7568
"""
# Define the source timezone in the source_date_time_field
df[source_date_time_field] = df[source_date_time_field].dt.tz_localize(source_timezone)
# Convert the datetime in the source_date_time_field to the new timezone in a new field called new_date_time_field
df[new_date_time_field] = df[source_date_time_field].dt.tz_convert(new_timezone)
### Allow authentication via login to U.S. Fish & Wildlife Service ArcGIS Online account via ArcGIS Pro
gis = GIS("pro")
### Enter path for local file saving
# uncomment next line to use ArcGIS interface, otherwise hard coding out_workspace
out_workspace = arcpy.GetParameterAsText(0)
# out_workspace = "C:/Users/kso/Desktop/"
### Paths to ArcGIS Online data
# To populate Service ItemId, go to Feature Service webpage and in bottom right corner, click on the View link.
# Current Feature Service webpage: https://fws.maps.arcgis.com/home/item.html?id=55275a4a0dc54c1c8dcab604b65a88f0
ServiceItemID = gis.content.get("55275a4a0dc54c1c8dcab604b65a88f0")
### There are separate methods for pulling spatial versus non-spatial data into Python. Spatial layers will become Spatially Enabled DataFrame objects. Non-spatial data will become regular pandas DataFrame objects.
## Define variables pointing to spatial layers
NestLocationLyr = ServiceItemID.layers[0]
BroodLocationLyr = ServiceItemID.layers[1]
CountUnitsLyr = ServiceItemID.layers[2]
## Create Spatially Enabled DataFrame objects
sedfNestLocation = pd.DataFrame.spatial.from_layer(NestLocationLyr)
sedfBroodLocation = pd.DataFrame.spatial.from_layer(BroodLocationLyr)
sedfCountUnits = pd.DataFrame.spatial.from_layer(CountUnitsLyr)
## Define variables point to non-spatial (tabular) data
NestVisitData = r"https://services.arcgis.com/QVENGdaPbd4LUkLV/arcgis/rest/services/Reproductive_Success_Survey_BETA/FeatureServer/6"
## Convert AGOL table to NumPy Array and then to pandas DataFrames
naNestVisitData = arcpy.da.TableToNumPyArray(NestVisitData,["OBJECTID","Date","ObserverName","NestCode","NumEggsObservedText","WaterLevel","Status","FailureCause","FailureComments","Bands","NestComments","GlobalID","NestLocationGlobalID","created_user","created_date","last_edited_user","last_edited_date"])
dfNestVisitData = | pd.DataFrame(naNestVisitData) | pandas.DataFrame |
import enum
import json
from glob import glob
from typing import Dict, List, Tuple
import re
import datetime as dt
from collections import Counter
import os
from numpy.random.mtrand import sample
from tqdm.auto import tqdm
import numpy as np
from numpy.random.mtrand import sample
import pandas as pd
import torch
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
#TODO cant import rn...
from transformers import AutoTokenizer, AutoModelForMaskedLM, AutoConfig
RAW_BASE_PATH = "../data/raw/"
ADMISSIONS_FNAME = "ADMISSIONS.csv.gz"
DIAGNOSES_FNAME = "DIAGNOSES_ICD.csv.gz"
LABEVENTS_FNAME = "LABEVENTS.csv.gz"
PRESCRIPTIONS_FNAME = "PRESCRIPTIONS.csv.gz"
PATIENTS_FNAME = "PATIENTS.csv.gz"
PATIENTS_PATH = os.path.join(RAW_BASE_PATH, PATIENTS_FNAME)
PATH_PROCESSED = "../data/processed/"
NOTES_PATH = os.path.join(PATH_PROCESSED, 'SAMPLE_NOTES.csv')
EMBEDDINGS_BASE_PATH = '../data/embeddings/'
SENTENCE_TENSOR_PATH = "../data/embeddings/99283.pt"
EMBEDDING_TEMPLATE = "../data/embeddings/{subj_id}.pt"
PRETRAINED_MODEL_PATH = 'deepset/covid_bert_base'
SAMPLE_SIZE = 10000
RANDOM_SEED = 1
TRAIN_SIZE = 0.8
# We need to take into account only the events that happened during the observation window. The end of observation window is N days before death for deceased patients and date of last event for alive patients. We can have several sets of events (e.g. labs, diags, meds), so we need to choose the latest date out of those.
OBSERVATION_WINDOW = 2000
PREDICTION_WINDOW = 50
class ModelType(enum.Enum):
Baseline = 'Baseline'
TF_IDF = 'TF_IDF'
Embeddings = 'Embeddings'
######################### ML STUFF
# common ML
def get_training_and_target(deceased_to_date: pd.Series, *feats_to_train_on: List[pd.DataFrame], is_baseline = False, improved_df = None) -> Tuple[pd.DataFrame, pd.Series]:
feats_to_train_on = [*feats_to_train_on]
if is_baseline:
df_final = pd.concat(feats_to_train_on, axis=1).fillna(0)
target = pd.Series(df_final.index.isin(deceased_to_date), index=df_final.index, name='target')
return df_final, target
if improved_df is None:
raise ValueError('should specify @improved_df if this is not a baseline model')
df_final = pd.concat(feats_to_train_on, axis=1).fillna(0)
improved_df = improved_df[improved_df.index.isin(df_final.index)]
feats_to_train_on = [*feats_to_train_on, improved_df]
df_final = pd.concat(feats_to_train_on, axis=1).fillna(0)
target = pd.Series(df_final.index.isin(deceased_to_date), index=df_final.index, name='target')
return df_final, target
def _train_and_predict(df: pd.DataFrame, target: pd.Series, train_loc: pd.Series, classifier) -> np.array:
classifier.fit(df[train_loc], target[train_loc])
pred = classifier.predict_proba(df[~train_loc])[:, 1]
return pred
def train_cl_model(model_type: ModelType, df: pd.DataFrame, train_ids: list, target: pd.Series) -> None:
train_loc = df.index.isin(train_ids)
cl = RandomForestClassifier(random_state=RANDOM_SEED)
pred = _train_and_predict(df, target, train_loc, cl)
print(f'Roc score RandomForestClassifier {model_type.value}: {roc_auc_score(target[~train_loc], pred)}')
feature_importances = pd.Series(cl.feature_importances_, index=df.columns).sort_values(ascending=False).iloc[:10]
print(f'Feature importances {model_type.value}: {feature_importances}\n')
# embedding ML
# TODO TODO refactor this to work on batches of notess
def get_vector_for_text(text: str, tokenizer: AutoTokenizer, model: AutoModelForMaskedLM) -> torch.Tensor:
"""This is ugly and slow."""
encoding = tokenizer(text,
add_special_tokens=True,
truncation=True,
padding="max_length",
return_attention_mask=True,
return_tensors="pt")
with torch.no_grad():
outputs = model(**encoding)
hs = outputs.hidden_states
token_embeddings = torch.stack(hs, dim=0)
token_embeddings = torch.squeeze(token_embeddings, dim=1)
token_embeddings = token_embeddings.permute(1,0,2)
token_vecs = hs[-2][0]
text_embedding = torch.mean(token_vecs, dim=0)
return text_embedding
def save_embedding(last_note, tokenizer: AutoTokenizer, model: AutoModelForMaskedLM) -> None:
for row_num, row in tqdm(last_note.iloc[0:].iterrows()):
text = row['TO_TOK']
subj_id = row['SUBJECT_ID']
embedding = get_vector_for_text(text, tokenizer, model)
torch.save(embedding, EMBEDDING_TEMPLATE.format(subj_id=subj_id))
def get_saved_embeddings() -> Tuple[List[int], List[np.array]]:
subj_ids = []
embeddings = []
for file in tqdm(glob(os.path.join(EMBEDDINGS_BASE_PATH, '*'))):
name = file.split('/')[-1]
subj_id = int(name.split('.')[0])
embedding = torch.load(file)
subj_ids.append(subj_id)
embeddings.append(np.array(embedding))
return subj_ids, embeddings
#################
def get_patient_sample() -> Tuple[set, pd.Series, pd.Series]:
patients = pd.read_csv(PATIENTS_PATH)
#sampling random patients
patients_sample = patients.sample(n=1000, random_state=RANDOM_SEED)
sample_ids = set(patients_sample.SUBJECT_ID)
# TODO why read-write?
with open(os.path.join(PATH_PROCESSED, "SAMPLE_IDS.json"), 'w') as f:
json.dump({'ids': list(sample_ids)}, f)
with open(os.path.join(PATH_PROCESSED, "SAMPLE_IDS.json"), 'r') as f:
sample_ids = set(json.load(f)['ids'])
patients_sample = patients[patients.SUBJECT_ID.isin(sample_ids)]
# Moratality set
deceased_to_date = patients_sample[patients_sample.EXPIRE_FLAG == 1] \
.set_index('SUBJECT_ID').DOD.map(lambda x: pd.to_datetime(x).date()).to_dict()
return sample_ids, patients_sample, deceased_to_date
## TODO Feature engr. helpers
def get_data_for_sample(sample_ids: set,
file_name: str,
chunksize: int = 10_000) -> pd.DataFrame:
"""Get the data only relevant for the sample."""
full_path = os.path.join(RAW_BASE_PATH, file_name)
iterator = pd.read_csv(full_path, iterator=True, chunksize=chunksize)
return pd.concat([chunk[chunk.SUBJECT_ID.isin(sample_ids)] for chunk in tqdm(iterator)])
def find_mean_dose(dose: str) -> float:
if | pd.isnull(dose) | pandas.isnull |
import pandas as pd
import datetime as dt
from ._db_data import DBData
class RDA(DBData):
"""A class that contains all the Rapid Diagnostic Analytics tests"""
def __init__(self):
super().__init__()
db_obj = DBData()
# assign class variables
self.df_ta = db_obj.retrieve_data('cln_shift')
self.df_pa = db_obj.retrieve_data('cln_payroll')
self.df_master = db_obj.retrieve_data('cln_emp_master')
def test_1(self):
"""Payslips: Annualised salaries - Detect indicators of employee on annualised salary & compare to \
“like for like” employees paid hourly rates."""
# Aggregate salaried employees net income and hours worked
# Find like for like hourly worker and times that amount by the total salaried employees hour and see difference.
# I've got employee master data in the folder.
# Brining in payroll dataset & master dataset
df_pa = self.df_pa.__deepcopy__()
df_master = self.df_master.__deepcopy__()
# Creating a list of just Salary employees
salary_emp = list(df_pa['emp_id'].loc[df_pa['pay_type']=='SAL'])
# Removing duplicates from these emp_ids'
def Remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
final_list.append(num)
return final_list
salary_emp = Remove(salary_emp)
# filtering the payroll data to include just salaried employees
df_sal_emp = df_pa.loc[df_pa['emp_id'].isin(salary_emp)]
# Filtering to remove all pay_types said to be excluded from the dataset
def sal_groupby(df_sal_emp):
df_sal_emp_exc = df_sal_emp.loc[df_sal_emp['mapping_inclusion'] != 'Exclude']
# Aggregating by emp_id to give total pay and total hours
agg_df_sal_emp = df_sal_emp_exc.groupby(['emp_id', 'position_name', 'level', 'employment_status', 'venue']).agg(
total_pay=pd.NamedAgg(column='period_amount', aggfunc=sum),
total_hours=pd.NamedAgg(column='hours_for_period', aggfunc=sum)).reset_index()
# Adding in the amount per hour worked
agg_df_sal_emp['amount_per_hour'] = agg_df_sal_emp['total_pay'] / agg_df_sal_emp['total_hours']
return agg_df_sal_emp
# Group by for salaried employees
agg_df_sal_emp = sal_groupby(df_pa.loc[df_pa['emp_id'].isin(salary_emp)])
# Adding a dummy key to show emp is salary
agg_df_sal_emp['is_emp_sal'] = 1
# Group by for non salaried employees
agg_df_non_sal_emp = sal_groupby(df_pa.loc[~df_pa['emp_id'].isin(salary_emp)])
# Adding a dummy key to show emp is NOT salary
agg_df_non_sal_emp['is_emp_sal'] = 0
# Aggregating together
agg_df_results = agg_df_sal_emp.append(agg_df_non_sal_emp)
# Returning converted to dict
return agg_df_results.to_dict(orient='list')
def test_2(self):
"""Payslips: “Fully loaded” flat rates - Detect indicators of employee on loaded flat rates & compare to \
“like for like” employees paid hourly rates."""
# For rockpool we dont have this!
pass
def test_3(self):
"""Payslips: Allowance consumption - Look for “like for like” employment and assess consistency of pay element \
consumption across the population."""
# within the payroll data key we have a flag for allowances
# Sum the allowances for each employee across the entire period
# Give a total of the hours work for period number of units of allowance awarded to them
# Brining in payroll dataset.
df_pa = self.df_pa.__deepcopy__()
# Filtering for just the allowances
df_pa = df_pa.loc[df_pa['is_allowance'] == 'y']
# aggregating over emp_id
allowance_agg_df = df_pa.groupby(['emp_id', 'position_name', 'mapping_codes', 'mapping_description']).agg(
total_allowance_paid=pd.NamedAgg(column='period_amount', aggfunc=sum),
total_allowance_hours=pd.NamedAgg(column='hours_for_period', aggfunc=sum)).reset_index()
return allowance_agg_df.to_dict(orient='list')
def test_4(self):
"""Payslips: Inaccurate classification or inconsistent rates - Look for “like for like” employment and \
determine deviation from mode rates paid at classification."""
# Group role, hr rate and count of all employees across data set.
# e.g if we have a cook who is being paid differently than all the others!
# Brining in payroll dataset.
df_pa = self.df_pa.__deepcopy__()
# Filtering for just the includes work codes as given by the rockpool logic
df_pa_inc = df_pa.loc[df_pa['mapping_inclusion'] != 'Exclude']
# Aggregating results.
df_pa_inc_agg = df_pa_inc.groupby(['emp_id', 'position_name']).agg(
total_pay=pd.NamedAgg(column='period_amount', aggfunc=sum),
total_hours=pd.NamedAgg(column='hours_for_period', aggfunc=sum)).reset_index()
# Adding in the amount per hour worked
df_pa_inc_agg['amount_per_hour'] = df_pa_inc_agg['total_pay'] / df_pa_inc_agg['total_hours']
return df_pa_inc_agg.to_dict(orient='list')
def test_5(self):
"""Payslips: Superannuation configuration and interpretation - Independent projection of super contributions \
and compare to actual payments. Challenge interpretations."""
# Map which payments should have super
# and then reconcile it to actual super payments
# However Rockpool dont have super in their data so can't do.
pass
def test_6(self):
"""Time & attendance: Employee “casualness” - Determine the regularity of employee working patterns rate an \
employee’s likelihood to be casual/non-casual."""
# Layout: if employees are working the same rough hours on each day consistently.
weekday = ['Mon', 'Tue', 'Wed', 'Thur', 'Fri', 'Sat', 'Sun']
df_ta = self.df_ta.__deepcopy__()
df_ta['shift_date'] = df_ta['shift_start'].dt.date.apply(lambda x: x.strftime('%Y-%m-%d'))
# Calculating the length of the shift in minutes
df_ta['shift_len_mins'] = (df_ta['shift_end'] - df_ta['shift_start']).dt. \
total_seconds().div(60).astype(int)
# The day of the week with Monday=0, Sunday=6. I have changed to str for analysis
df_ta['day_of_week'] = df_ta['shift_start'].dt.dayofweek.astype(int).apply(lambda x: weekday[x])
# Dummy to show if shift starts in AM or PM
df_ta['am'] = df_ta['shift_start'].apply(lambda x: 'am' if x.time() < dt.time(12) else 'pm')
# creating a concat to show day and AM or PM of shift
df_ta['shift_overview'] = df_ta['day_of_week'] + '_' + df_ta['am']
# Creating concat to feed into remove duplicates to get rid of split shifts per data and AM or PM e.g
# Someone works two PM shifts
df_ta['emp_shift_date_am_pm'] = df_ta['emp_id'] + '_' +\
df_ta['shift_date'] + \
'_' + df_ta['am']
# Taking a snap shot of df_ta to be returned before deduplication to give a calendar heat map.
# df_ta['shift_start'] = df_ta['shift_start'].apply(lambda x: x.strftime('%d/%-m/%y' '%H:%M:%S'))
# df_ta['shift_end'] = df_ta['shift_end'].apply(lambda x: x.strftime('%d/%-m/%y' '%H:%M:%S'))
cal_heat_map = df_ta[:]
return cal_heat_map.to_dict()
def test_7(self, shift_duration_hrs, min_break_duration_mins):
"""Time & attendance: Rest and meal breaks - Analyse shift patterns and timing and length of breaks across \
employee cohorts to find potentially missing entitlements."""
# If employees are taking the required break each shift
# with tsid_start_date being the same day the break is calculated by the time between the end of the last shift
# and the start of the next shift.
# two parameters.. length of shift worked, length of break
# Output - Which employees arnt taking the breaks
df_ta = self.df_ta.__deepcopy__()
# Creating the shift_date column for anlysis
df_ta['shift_date'] = df_ta['shift_start'].dt.date.apply(lambda x: x.strftime('%Y-%m-%d'))
# Sort by emp_id, shift date, shift start time
df_ta = df_ta.sort_values(by=['emp_id', 'shift_start'], ascending=True)
# Get shift start and end time for each employee on each day
shifts = df_ta.groupby(['emp_id', 'shift_date']).agg({'shift_start': 'min', 'shift_end': 'max'})
shifts.columns = ['min_shift_start', 'max_shift_end']
shifts = shifts.reset_index()
shifts['max_shift_end'] = pd.to_datetime(shifts['max_shift_end'])
shifts['min_shift_start'] = pd.to_datetime(shifts['min_shift_start'])
# Get net shift duration
shifts['shift_dur'] = shifts['max_shift_end'] - shifts['min_shift_start'] # 'timedelta' shift duration
shifts['shift_dur'] = shifts['shift_dur'].dt.total_seconds().div(3600) # convert timedelta to hours float
# Flag if employee was entitled to a break (shift length >= 6 hours)
shifts['break_flag'] = 0
shifts.loc[shifts['shift_dur'] >= shift_duration_hrs, 'break_flag'] = 1
# Merge shift duration and break flag with df_ta
merged_df = df_ta.merge(shifts, how='left', on=['emp_id', 'shift_date'])
# print(results) # test for faulty date
# Append row-shifted columns 'emp_id', 'shift_date' and 'shift_start_time' to calculate break duration
merged_df['next_emp_id'] = merged_df['emp_id'].shift(periods=-1).fillna('') # collect next row's emp_id
merged_df['next_shift_date'] = merged_df['shift_date'].shift(periods=-1).fillna(
pd.to_datetime('1900-01-01 00:00:00')) # collect next rows' shift date
merged_df['next_shift_start_time'] = merged_df['shift_start'].shift(periods=-1).fillna(
pd.to_datetime('1900-01-01 00:00:00')) # collect next row's start time
# Check using above if the next row is part of the same shift (same emp id and shift start date)
merged_df['next_shift_flag'] = 0
merged_df.loc[(merged_df['emp_id'] == merged_df['next_emp_id']) &
(merged_df['shift_date'] == merged_df['next_shift_date']),
'next_shift_flag'] = 1 # flag if same shift
merged_df['next_shift_start_time'] = pd.to_datetime(merged_df['next_shift_start_time'])
merged_df['shift_end'] = pd.to_datetime(merged_df['shift_end'])
# If both id and shift match, then calculate timedelta
merged_df.loc[merged_df['next_shift_flag'] == 1, 'break_dur'] = merged_df['next_shift_start_time'] - \
merged_df['shift_end']
merged_df.loc[merged_df['break_dur'].isnull(), 'break_dur'] = pd.to_timedelta(
0) # replace null with 0 timedelta
# convert timedelta to integer minutes
merged_df['break_dur'] = merged_df['break_dur'].dt.total_seconds().div(60).astype(int)
# generate list of shifts where employee did not take entitled break, or break taken is less than 30 min
merged_df['break_not_taken'] = 0
merged_df.loc[(merged_df['break_flag'] == 1) & (merged_df['next_shift_flag']) &
(merged_df['break_dur'] < min_break_duration_mins), 'break_not_taken'] = 1
return merged_df.to_dict(orient='list')
# Ollie
def test_8(self):
"""Time & attendance: Minimum and maximum engagement periods - Analyse average hours worked on daily, weekly \
and fortnightly basis to identify potential non-compliance."""
df_ta = self.df_ta.__deepcopy__()
# Calculating the length of the shift in minutes
df_ta['shift_len_mins'] = (df_ta['shift_end'] - df_ta['shift_start']).dt. \
total_seconds().div(60).astype(int)
# Creating a dataframe with just unique emp_id
results = pd.DataFrame(df_ta['emp_id'].unique(), columns=['emp_id'])
# 'D' Calculates the sum of the minutes worked for each employee for each day.
# 'W' Calculates the sum of the minutes worked for each employee for each week. The week starts on Monday and\
# runs to Sunday. The given shift_date in the below table is the last dat of that week.
# 'SM' Calculates the sum of the minutes worked for each employee for each fortnight. The fortnight starts on \
# the 1st of each month and runs to the 14th of the month. The shift_date given is the start of the fortnight.
freq_list = ['D', 'W', 'SM']
# Looping over the frequencies in the freq_list
for freq in freq_list:
# Getting the mean hours worked for each employee for each frequency
example = df_ta.groupby(['emp_id', pd.Grouper(key='shift_start', freq=freq)])['shift_len_mins'] \
.sum().reset_index().sort_values('shift_start').groupby(['emp_id']).mean() / 60
# Saving the results to the results table
results = results.merge(example, left_on='emp_id', right_on='emp_id')
# renaming the results table
results.columns = ['emp_id', 'daily_ave_hr', 'weekly_ave_hr', 'fortnightly_ave_hr']
# R Rounding the results to 2DP
results['daily_ave_hr'] = round(results['daily_ave_hr'], 2)
results['weekly_ave_hr'] = round(results['weekly_ave_hr'], 2)
results['fortnightly_ave_hr'] = round(results['fortnightly_ave_hr'], 2)
return results.to_dict(orient='list')
# Jack
def test_9(self, min_gap):
"""Time & attendance: Gaps between shifts - Analyse gaps between shifts to identify potential non-compliance \
if not paid at the correct rate."""
# difference between tsid_act_end_time and tsid_act_start_time of the next shift
# Looking for employee needing to have certain length of break between shifts
# Parameter: Minimum amount of time off between shifts required (for example 10)
# output - Employees who breach this / all occurrences TEST
df_ta = self.df_ta.__deepcopy__()
# Sort by emp_id, shift date, shift start time
df_ta = df_ta.sort_values(by=['emp_id', 'shift_start'], ascending=True)
# Creating the shift_date column for anlysis
df_ta['shift_date'] = df_ta['shift_start'].dt.date.apply(lambda x: x.strftime('%Y-%m-%d'))
# Get shift start and end time for each employee on each day --- CHECK IF THIS IS REDUNDANT?
shifts = df_ta.groupby(['emp_id', 'shift_date']).agg({'shift_start': 'min', 'shift_end': 'max'})
shifts.columns = ['min_shift_start', 'max_shift_end']
shifts = shifts.reset_index()
# Append row-shifted columns 'emp_id', 'shift_date' and 'min_shift_start_time' to calculate break between shifts
shifts['next_emp_id'] = shifts['emp_id'].shift(periods=-1).fillna('') # collect next row's emp_id
shifts['next_shift_date'] = shifts['shift_date'].shift(periods=-1).fillna(
pd.to_datetime('1900-01-01 00:00:00')) # collect next rows' shift date
shifts['next_shift_start'] = shifts['min_shift_start'].shift(periods=-1).fillna(
pd.to_datetime('1900-01-01 00:00:00')) # collect next row's start time
shifts['next_shift_start'] = pd.to_datetime(shifts['next_shift_start'])
shifts['max_shift_end'] = pd.to_datetime(shifts['max_shift_end'])
# Calculate timedelta
shifts.loc[shifts['emp_id'] == shifts['next_emp_id'], 'shift_gap'] = shifts['next_shift_start'] - shifts[
'max_shift_end']
shifts.loc[shifts['shift_gap'].isnull(), 'shift_gap'] = pd.to_timedelta(0) # replace null with 0 timedelta
# Convert timedelta to float hours
shifts['shift_gap'] = shifts['shift_gap'].dt.total_seconds().div(3600)
# Flag if employee was entitled to a break (shift gap < 10 hours)
shifts['min_gap_not_taken'] = 0
shifts.loc[(shifts['shift_gap'] > 0.0) & (shifts['shift_gap'] < min_gap), 'min_gap_not_taken'] = 1
return shifts.to_dict(orient='list')
# Philip
def test_10(self):
"""Time & attendance: Consecutive shifts - Analyse number of consecutive shifts worked to identify potential \
non-compliance if not paid at the correct rate."""
df_ta = self.df_ta.__deepcopy__()
# Creating the shift_date column for anlysis
df_ta['shift_date'] = df_ta['shift_start'].dt.date.apply(lambda x: | pd.to_datetime(x) | pandas.to_datetime |
import pandas as pd
from pandas_datareader.base import _BaseReader
from pandas_datareader.exceptions import DEP_ERROR_MSG, ImmediateDeprecationError
class RobinhoodQuoteReader(_BaseReader):
"""
Read quotes from Robinhood
DEPRECATED 1/2019 - Robinhood ended support for the endpoints used by this
reader
Parameters
----------
symbols : {str, List[str]}
String symbol of like of symbols
start : None
Quotes are near real-time and so this value is ignored
end : None
Quotes are near real-time and so this value is ignored
retry_count : int, default 3
Number of times to retry query request.
pause : float, default 0.1
Time, in seconds, of the pause between retries.
session : Session, default None
requests.sessions.Session instance to be used
freq : None
Quotes are near real-time and so this value is ignored
"""
_format = "json"
def __init__(
self,
symbols,
start=None,
end=None,
retry_count=3,
pause=0.1,
timeout=30,
session=None,
freq=None,
):
raise ImmediateDeprecationError(DEP_ERROR_MSG.format("Robinhood"))
super(RobinhoodQuoteReader, self).__init__(
symbols, start, end, retry_count, pause, timeout, session, freq
)
if isinstance(self.symbols, str):
self.symbols = [self.symbols]
self._max_symbols = 1630
self._validate_symbols()
self._json_results = []
def _validate_symbols(self):
if len(self.symbols) > self._max_symbols:
raise ValueError(
"A maximum of {0} symbols are supported "
"in a single call.".format(self._max_symbols)
)
def _get_crumb(self, *args):
pass
@property
def url(self):
"""API URL"""
return "https://api.robinhood.com/quotes/"
@property
def params(self):
"""Parameters to use in API calls"""
symbols = ",".join(self.symbols)
return {"symbols": symbols}
def _process_json(self):
res = pd.DataFrame(self._json_results)
return res.set_index("symbol").T
def _read_lines(self, out):
if "next" in out:
self._json_results.extend(out["results"])
return self._read_one_data(out["next"])
self._json_results.extend(out["results"])
return self._process_json()
class RobinhoodHistoricalReader(RobinhoodQuoteReader):
"""
Read historical values from Robinhood
DEPRECATED 1/2019 - Robinhood ended support for the endpoints used by this
reader
Parameters
----------
symbols : {str, List[str]}
String symbol of like of symbols
start : None
Ignored. See span and interval.
end : None
Ignored. See span and interval.
retry_count : int, default 3
Number of times to retry query request.
pause : float, default 0.1
Time, in seconds, of the pause between retries.
session : Session, default None
requests.sessions.Session instance to be used
freq : None
Quotes are near real-time and so this value is ignored
interval : {'day' ,'week', '5minute', '10minute'}
Interval between historical prices
span : {'day', 'week', 'year', '5year'}
Time span relative to now to retrieve. The available spans are a
function of interval. See notes
Notes
-----
Only provides up to 1 year of daily data.
The available spans are a function of interval.
* day: year
* week: 5year
* 5minute: day, week
* 10minute: day, week
"""
_format = "json"
def __init__(
self,
symbols,
start=None,
end=None,
retry_count=3,
pause=0.1,
timeout=30,
session=None,
freq=None,
interval="day",
span="year",
):
raise ImmediateDeprecationError(DEP_ERROR_MSG.format("Robinhood"))
super(RobinhoodHistoricalReader, self).__init__(
symbols, start, end, retry_count, pause, timeout, session, freq
)
interval_span = {
"day": ["year"],
"week": ["5year"],
"10minute": ["day", "week"],
"5minute": ["day", "week"],
}
if interval not in interval_span:
raise ValueError(
"Interval must be one of " "{0}".format(", ".join(interval_span.keys()))
)
valid_spans = interval_span[interval]
if span not in valid_spans:
raise ValueError(
"For interval {0}, span must "
"be in: {1}".format(interval, valid_spans)
)
self.interval = interval
self.span = span
self._max_symbols = 75
self._validate_symbols()
self._json_results = []
@property
def url(self):
"""API URL"""
return "https://api.robinhood.com/quotes/historicals/"
@property
def params(self):
"""Parameters to use in API calls"""
symbols = ",".join(self.symbols)
pars = {"symbols": symbols, "interval": self.interval, "span": self.span}
return pars
def _process_json(self):
df = []
for sym in self._json_results:
vals = pd.DataFrame(sym["historicals"])
vals["begins_at"] = | pd.to_datetime(vals["begins_at"]) | pandas.to_datetime |
"""
Same basic parameters for the Baselining work.
@author: <NAME>, <NAME>
@date Aug 30, 2016
"""
import numpy as np
import pandas as pd
import os
from itertools import chain, combinations
from scipy.signal import cont2discrete
from datetime import datetime
from pytz import timezone
from pandas.tseries.holiday import USFederalHolidayCalendar
E19 = ['E19TOU_secondary', 'E19TOU_primary', 'E19TOU_transmission']
# define social cost of carbon
# if cost per metric ton is $40:
carbon_costs = {2012: 16.60, 2013: 11.62, 2014: 11.62}
# # if cost per metric ton is $38:
# carbon_costs = {'2012': 15.77, '2013': 10.79, '2014': 10.79}
def create_folder(filename):
"""
Helper function for safely creating all sub-directories
for a specific file in python2
"""
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def matrices_frauke(ts=15):
"""
Return matrices A, B, and E of the discrete-time dynamical system model
of Frauke's builing model with sampling time ts minutes.
"""
# define matrices for Frauke's Building model
# note: both input variables are assumed to be non-negative!
c1, c2, c3 = 9.356e5, 2.97e6, 6.695e5
k1, k2, k3, k4, k5 = 16.48, 108.5, 5.0, 30.5, 23.04
Act = np.array([[-(k1+k2+k3+k5)/c1, (k1+k2)/c1, k5/c1],
[(k1+k2)/c2, -(k1+k2)/c2, 0],
[k5/c3, 0, -(k4+k5)/c3]])
Bct = np.array([[1/c1, -1/c1],
[0, -0],
[0, -0]])
Ect = np.array([[k3/c1, 1/c1, 1/c1],
[0, 1/c2, 0],
[k4/c3, 0, 0]])
Cct = np.array([[0, 0, 0]])
Dct = np.array([[0, 0]])
# convert cont time matrices to discrete time using zoh
(A, B, C, D, dt) = cont2discrete((Act, Bct, Cct, Dct), ts*60, method='zoh')
(A, E, C, D, dt) = cont2discrete((Act, Ect, Cct, Dct), ts*60, method='zoh')
return A, B, E
def matrices_pavlak(ts=15):
"""
Return matrices A, B, and E of the discrete-time dynamical system model
of Pavlak's builing model with sampling time ts minutes.
"""
# define matrices for Pavlak's Building model
# note: both input variables are assumed to be non-negative!
# R's in K/kW and C's in kJ/K
R1, R2 = 0.519956063919649, 0.00195669419889427
R3, Rw = 0.00544245602566602, 0.156536600054259
Cz, Cm = 215552.466637916, 8533970.42635405
den = R2*R3 + Rw*R2 + Rw*R3
Act = np.array([[Rw*R2/(R3*Cz*den) - 1/(R3*Cz), Rw/(Cz*den)],
[Rw/(Cm*den), -1/(R1*Cm)-1/(R2*Cm)+Rw*R3/(R2*Cm*den)]])
Bct = np.array([[1/Cz, -1/Cz],
[0, -0]])
Ect = np.array([[R2/(Cz*den), Rw*R2/(Cz*den), 0.43*Rw*R2/(Cz*den)+0.57/Cz],
[R3/(Cm*den)+1/(R1*Cm), Rw*R3/(Cm*den), 0.43*Rw*R3/(Cm*den)]])
Cct = np.array([[0, 0, 0]])
Dct = np.array([[0, 0]])
# convert cont time matrices to discrete time using zoh
(A, B, C, D, dt) = cont2discrete((Act, Bct, Cct, Dct), ts*60, method='zoh')
(A, E, C, D, dt) = cont2discrete((Act, Ect, Cct, Dct), ts*60, method='zoh')
return A, B, E
def powerset(iterable):
"""
Auxiliary function for computing the power set of an iterable:
powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
Does not create the set explicitly.
"""
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def extract_PGE_loadshape(filename, start_date=None, end_date=None, name=None):
"""
Reads in tab-separated files (falsely labeled .xls by PG&E) from the
PG&E website: http://www.pge.com/tariffs/energy_use_prices.shtml
Parses the dates and replaces all missing values (b/c of DST) by NaN
"""
loadshapes = pd.read_csv(filename, sep='\t', parse_dates=['Date'],
na_values=['.'], index_col='Date')
if start_date is not None:
loadshapes = loadshapes[loadshapes.index >= start_date]
if end_date is not None:
loadshapes = loadshapes[loadshapes.index <= end_date]
# get DST transisiton times in US/Pacfic timezone
ttimes = pd.DatetimeIndex(timezone('US/Pacific')._utc_transition_times[1:],
tz=timezone('UTC')).tz_convert('US/Pacific')
ttimes = ttimes[(ttimes >= loadshapes.index[0]) &
(ttimes <= loadshapes.index[-1])]
# fix inconsistencies b/c of DST changes
dst_switch_days = loadshapes.index.isin(ttimes.date)
non_switch_data = loadshapes[np.logical_not(dst_switch_days)].drop(
'Profile', axis=1)
switch_data = loadshapes[dst_switch_days].drop('Profile', axis=1)
idx = pd.DatetimeIndex(
start=loadshapes.index[0],
end=loadshapes.index[-1] + pd.Timedelta(hours=23),
freq='H', tz=timezone('US/Pacific'))
isswitch = pd.DatetimeIndex(idx.date).isin(ttimes.date)
nsidx = idx[np.logical_not(isswitch)]
loadSeries = pd.Series(
non_switch_data.values.astype(np.float64).flatten(), nsidx)
dst_series = []
for day in switch_data.index:
vals = switch_data.loc[day].values
tsidx = pd.DatetimeIndex(start=day, end=day+pd.Timedelta(hours=23),
freq='1H', tz=timezone('US/Pacific'))
if (day.month > 0) & (day.month < 5):
daydata = np.concatenate([vals[:2], vals[3:]])
else:
daydata = np.insert(vals, 1, vals[1])
dst_series.append(pd.Series(daydata, tsidx))
loadSeries = pd.concat([loadSeries] + dst_series).sort_index()
if name is not None:
loadSeries.name = name
return loadSeries
def daily_occurrences(index, tz='US/Pacific'):
"""
Takes in a pandas DateTimeIndex and returns a pandas Series
indexed by the days in index with the number of occurances of
timestamps on that day as the values.
"""
locidx = index.tz_convert(tz)
occ = pd.DataFrame({'occurences': 1}, index=locidx).groupby(
locidx.date).count()['occurences']
occ.index = pd.DatetimeIndex(occ.index)
return occ
def _parse_nbt_data():
"""
Prepares data for CAISO net benefits test.
"""
# define NERC holidays for use in CAISO net benefits test
NERC_holidays = ([datetime(year, 1, 1) for year in [2012, 2013, 2014]] +
[datetime(2012, 5, 28), datetime(2013, 5, 27),
datetime(2014, 5, 26)] +
[datetime(year, 7, 4) for year in [2012, 2013, 2014]] +
[datetime(2012, 9, 3), datetime(2012, 9, 2),
datetime(2014, 9, 1)] +
[datetime(2014, 11, 22), datetime(2014, 11, 28),
datetime(2014, 11, 27)] +
[datetime(year, 12, 25) for year in [2012, 2013, 2014]])
holiday_idx = [
pd.date_range(
start=day, end=day+pd.DateOffset(days=1) - pd.Timedelta(minutes=15),
tz='US/Pacific', freq='15Min')
for day in NERC_holidays
]
NERC_hd_ts = pd.DatetimeIndex.union_many(holiday_idx[0], holiday_idx[1:])
NERC_hd_ts = NERC_hd_ts.sort_values().tz_convert('GMT')
# load the values of the CAISO nbt from data file
nbl_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'data', 'CAISO_NBT.csv')
nbt = pd.read_csv(nbl_file, parse_dates=['Month'], index_col='Month')
# convert into a single dataframe with all tiestamps present
dfs = []
for start, end in zip(nbt.index,
nbt.index.shift(1, pd.DateOffset(months=1))):
tsstart = start.tz_localize('US/Pacific').tz_convert('GMT')
tsend = (end.tz_localize('US/Pacific').tz_convert('GMT') -
pd.Timedelta(minutes=15))
dfs.append(pd.DataFrame(
{
'OnPeak': nbt.loc[start]['OnPeak'],
'OffPeak': nbt.loc[start]['OffPeak']
},
index= | pd.date_range(start=tsstart, end=tsend, freq='15Min') | pandas.date_range |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.