prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pathlib
from pathlib import Path
from typing import Union, Tuple
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
class QuestionnaireAnalysis:
"""
Reads and analyzes data generated by the questionnaire experiment.
Should be able to accept strings and pathlib.Path objects.
"""
def __init__(self, data_fname: Union[pathlib.Path, str]):
"""Initiate QuestionnaireAnalysis class with following arguments.
Arguments:
data_fname {Union[pathlib.Path, str]} -- [path to .json file ocntaining subjects' data]
"""
self.data_fname = Path(data_fname)
if not self.data_fname.is_file():
raise ValueError
def read_data(self):
"""Reads the json data located in self.data_fname into memory, to
the attribute self.data.
"""
self.data = pd.read_json(self.data_fname)
def show_age_distrib(self) -> Tuple[np.ndarray, np.ndarray]:
"""Calculates and plots the age distribution of the participants.
Returns
-------
hist : np.ndarray
Number of people in a given bin
bins : np.ndarray
Bin edges
"""
self.read_data()
bin_edges = np.arange(0, 110, 10)
ax = self.data.hist(column="age", bins=bin_edges)
hist, bins = np.histogram(self.data.age.dropna().values, bins=bin_edges)
plt.show()
return (hist, bins)
def check_email(self, email: str):
# regex = "^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w)+$"
regex = r"[^@]+@[^@]+\.[^@]+"
flag = False
if re.search(regex, email):
flag = True
return flag
def remove_rows_without_mail(self) -> pd.DataFrame:
"""Checks self.data for rows with invalid emails, and removes them.
Returns
-------
df : pd.DataFrame
A corrected DataFrame, i.e. the same table but with the erroneous rows removed and
the (ordinal) index after a reset.
"""
self.read_data()
df = self.data.copy()
for i in df.index:
if not self.check_email(df.email[i]):
df.drop(i, inplace=True)
df = df.reset_index(drop=True)
return df
def fill_na_with_mean(self) -> Tuple[pd.DataFrame, np.ndarray]:
"""Finds, in the original DataFrame, the subjects that didn't answer
all questions, and replaces that missing value with the mean of the
other grades for that student.
Returns
-------
df : pd.DataFrame
The corrected DataFrame after insertion of the mean grade
arr : np.ndarray
Row indices of the students that their new grades were generated
"""
df = self.data.copy()
scores_df = df.loc[:, "q1":"q5"]
arr = scores_df.index[scores_df.isnull().any(1)]
scores_df = scores_df.T.fillna(scores_df.mean(axis=1)).T
df.loc[:, "q1":"q5"] = scores_df
return df, arr
def score_subjects(self, maximal_nans_per_sub: int = 1) -> pd.DataFrame:
"""Calculates the average score of a subject and adds a new "score" column
with it.
If the subject has more than "maximal_nans_per_sub" NaN in his grades, the
score should be NA. Otherwise, the score is simply the mean of the other grades.
The datatype of score is UInt8, and the floating point raw numbers should be
rounded down.
Parameters
----------
maximal_nans_per_sub : int, optional
Number of allowed NaNs per subject before giving a NA score.
Returns
-------
pd.DataFrame
A new DF with a new column - "score".
"""
self.read_data()
df = self.data.copy()
scores_df = df.loc[:, "q1":"q5"]
scores_df["score"] = scores_df.mean(axis=1).apply(np.floor)
scores_df[scores_df.isnull().sum(axis=1) > maximal_nans_per_sub] = np.nan
scores = | pd.Series(scores_df.score.values, dtype="UInt8") | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 8 18:04:32 2020
@author: mofarrag
"""
import os
import pandas as pd
import numpy as np
import datetime as dt
from scipy.stats import gumbel_r
import Hapi.Raster as Raster
import matplotlib.pyplot as plt
import zipfile
import Hapi.Raster as Raster
class River():
# class attributes
def __init__(self, name, days = 36890, start = "1950-1-1",
leftOvertopping_Suffix = "_left.txt",
RightOvertopping_Suffix = "_right.txt", DepthPrefix = "DepthMax",
DurationPrefix = "Duration", ReturnPeriodPrefix = "ReturnPeriod" ):
self.name = name
self.start = dt.datetime.strptime(start,"%Y-%m-%d")
self.end = self.start + dt.timedelta(days = days)
self.leftOvertopping_Suffix = leftOvertopping_Suffix
self.RightOvertopping_Suffix = RightOvertopping_Suffix
self.OneDResultPath = ''
self.TwoDResultPath = ''
self.DepthPrefix = DepthPrefix
self.DurationPrefix = DurationPrefix
self.ReturnPeriodPrefix = ReturnPeriodPrefix
Ref_ind = | pd.date_range(self.start,self.end, freq='D') | pandas.date_range |
#!/usr/bin/env python
"""
The script converts the .dat files from afphot to .nc files for M2 pipeline.
Before running this script, afphot should be ran (usually in muscat-abc)
and its results copied to /ut2/muscat/reduction/muscat/DATE.
To convert .dat to .nc, this script does the following.
1. read the .dat files in /ut2/muscat/reduction/muscat/DATE/TARGET_N/PHOTDIR/radXX.0
where
DATE: observation date (e.g. 191029)
TARGET_N: e.g. TOI516_0, TOI516_1, TOI516_2 for g-,r-,z-band produced by afphot
PHOTDIR: either apphot_mapping or apphot_centroid
radXX.0: radius containing .dat files
2. convert JD to BJD_TDB, although M2 pipeline uses MJD_TDB
3. construct xarrays assuming:
fwhm as proxy to object entropy (eobj)
sky as proxy to sky median (msky)
peak as proxy to sky entropy (esky)
3. save xarrays dataset into .nc files for each band
"""
import os
import re
from glob import glob
import pandas as pd
from astropy.time import Time
from tqdm import tqdm
import numpy as np
from astropy.io import fits
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.coordinates import EarthLocation
import xarray as xa
import matplotlib.pyplot as pl
from astroplan.plots import plot_finder_image
from astropy.visualization import ZScaleInterval
interval = ZScaleInterval(contrast=0.5)
from muscat2ph.phdata import PhotometryData
# import sys
# sys.path.append('/home/muscat/muscat2/')
# from toi_functions import get_toi
#http://www.oao.nao.ac.jp/en/telescope/abouttel188/
oao = EarthLocation.from_geodetic(lat='34:34:37.47', lon='133:35:38.24', height=372*u.m)
muscat_fov = 6.8 #arcsec in diagonal
fov_rad = muscat_fov*u.arcmin
interval = ZScaleInterval()
def binned(a, binsize, fun=np.mean):
a_b = []
for i in range(0, a.shape[0], binsize):
a_b.append(fun(a[i:i+binsize], axis=0))
return a_b
class DatReader:
def __init__(self, obsdate, objname, objcoord, bands=['g','r','z_s'], nstars=None,
ref_frame=0, ref_band='r',
photdir='apphot_mapping', datadir= '/ut2/muscat/reduction/muscat',
verbose=True, overwrite=False):
"""initialize
"""
if 'z' in bands:
raise ValueError('use z_s instead of z')
self.obsdate = obsdate
self.objname = objname
self.bands = bands
self.ref_band = ref_band
self.ref_frame = ref_frame
self.nstars = nstars
self.photdir = photdir
self.datadir = datadir
self.objcoord = self._get_obj_coord(objcoord)
self.paths = self._get_paths()
self.airmasses = None
self.exptimes = None
self.data = self._load_dat_files()
self.radii = {band: sorted(self.data[band].keys()) for band in self.bands}
self.jds = {band: sorted(self.data[band][self.radii[band][0]].keys()) for band in self.bands}
self.mjds = None
self.bjds = None #tdb
self.use_barycorrpy = False
self._convert_to_bjd_tdb() #populate mjds and bjds attributes
self.verbose = verbose
self.overwrite = overwrite
def _get_obj_coord(self, objcoord):
"""Define coord used in bjd_tdb conversion
"""
objcoord = SkyCoord(ra=objcoord[0], dec=objcoord[1], unit='deg')
return objcoord
def _get_paths(self):
"""get path to each data directory per band
"""
paths = {}
nradii = {}
loc = f'{self.datadir}/{self.obsdate}'
if not os.path.exists(loc):
raise FileNotFoundError(f'afphot files not found in {loc}')
for n,band in enumerate(self.bands):
path = f'{loc}/{self.objname}_{n}/{self.photdir}'
radius_dirs = glob(path+'/rad*')
errmsg = f'{path} is empty'
assert len(radius_dirs)>0, errmsg
paths[band] = radius_dirs
nradii[band] = (len(radius_dirs))
errmsg = f'nradii: {nradii} have unequal number of radius directories'
assert len(set(nradii.values()))==1, errmsg
return paths
def _load_dat_files(self):
"""get data per band per aperture radius per cadence;
aperture radius is parsed from the directory produced by afphot
Note: aperture radius in afphot is chosen arbitrarily,
whereas M2 pipeline uses 9 radii: (4,8,12,16,20,25,30,40,50) pix
TODO: when a .dat file is corrupted, it is better to populate
the entry with a dataframe of null/NaN values; currrently it is
simpler to omit/skip using the entire radius directory
"""
data = {}
exptimes = {}
airmasses = {}
for band in tqdm(self.bands, desc='reading .dat files'):
radius_dirs = self.paths[band]
apertures = {}
for radius_dir in radius_dirs:
#parse radius from directory name
radius = float(radius_dir.split('/')[-1][3:])
#get dat files inside aperture radius directory
dat_files = glob(radius_dir+'/*')
dat_files.sort()
#specify column names based written in .dat file
column_names = 'ID xcen ycen nflux flux err sky sky_sdev SNR nbadpix fwhm peak'.split()
cadences = {}
exptime = []
airmass = []
nrows, ncols = [], []
for i,dat_file in enumerate(dat_files):
try:
#parse lines 0, 18, 20 which contains gjd, exptime, and airmass
d = pd.read_csv(dat_file, header=None)
time = float(d.iloc[0].str.split('=').values[0][1]) #gjd - 2450000
time+=2450000
exptime.append(float(d.iloc[18].str.split('=').values[0][1]))
airmass.append(float(d.iloc[20].str.split('=').values[0][1]))
except Exception as e:
#some afphot dat files may be corrupted
errmsg = f'{dat_file} seems corrupted.\n\n'
errmsg+='You can temporarily delete the radius directory in each band:\n'
for n,_ in enumerate(self.bands):
p = f'{self.datadir}/{self.obsdate}/{self.objname}_{n}/{self.photdir}/rad{radius}\n'
errmsg+=f'$ rm -rf {p}'
raise IOError(errmsg)
# parse succeeding lines as dataframe
d = | pd.read_csv(dat_file, delim_whitespace=True, comment='#', names=column_names) | pandas.read_csv |
from __future__ import print_function, unicode_literals
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import sys
import os
if not sys.warnoptions:
warnings.simplefilter("ignore")
import click
from tabulate import tabulate
import emoji
from pyfiglet import Figlet
import gensim
import spacy
from gensim.summarization.summarizer import summarize
from gensim.summarization import keywords
import pandas as pd
from newspaper import Article
tabulate.PRESERVE_WHITESPACE = True
from PyInquirer import style_from_dict, Token, prompt, Separator
from pprint import pprint
nlp = spacy.load('en')
style = style_from_dict({
Token.Separator: '#cc5454',
Token.QuestionMark: '#673ab7 bold',
Token.Selected: '#cc5454', # default
Token.Pointer: '#673ab7 bold',
Token.Instruction: '', # default
Token.Answer: '#f44336 bold',
Token.Question: '',
})
nlp = spacy.load('en', parser=False)
model = gensim.models.Word2Vec.load('sentence_doc2vec_model.doc2vec')
class Document(object):
def __init__(self, home=None, debug=False):
self.home = os.path.abspath(home or '.')
self.debug = debug
@click.group()
@click.option('--document')
@click.pass_context
def cli(ctx, document):
ctx.obj = Document(document)
pass_document = click.make_pass_decorator(Document)
def read_corpus(documents):
for i, plot in enumerate(documents):
yield gensim.models.doc2vec.TaggedDocument(gensim.utils.simple_preprocess(plot, max_len=30), [i])
click.echo("")
click.echo("")
click.echo("")
click.echo("")
click.echo("")
click.echo("")
click.echo("")
f = Figlet(font='slant')
click.echo(f.renderText('WIRED! CLI'))
click.echo('Wired! CLI Is A Command Line Interface for Image Analysis. This CLI allows users to develop image categorization and object detection interfaces from a folder of images. It was developed throught the Wired! Lab at Duke University {}'.format(emoji.emojize(':zap:', use_aliases=True)))
click.echo("")
click.echo("")
click.echo("")
click.echo("")
click.echo("")
click.echo("")
click.echo("")
@cli.command()
@click.option('--document', prompt='What article would you like to use?', help='ex. document_1.txt -- the document that you would like to query')
def train(document):
"""
Train a new model
"""
article = Article(document)
article.download()
article.parse()
doc = nlp(article.text)
sentences = [sent.string.strip() for sent in doc.sents]
articledf = pd.DataFrame({"sentence": sentences})
articledf['name'] = 'article1'
train_corpus = list(read_corpus(articledf.sentence))
model = gensim.models.doc2vec.Doc2Vec(size=50, min_count=2, iter=55)
model.build_vocab(train_corpus)
click.echo('....Training Model {}'.format(emoji.emojize(':muscle:', use_aliases=True)))
model.train(train_corpus, total_examples=model.corpus_count, epochs=model.iter)
click.echo('....Saving Model {}'.format(emoji.emojize(':pencil:', use_aliases=True)))
model.save('sentence_doc2vec_model.doc2vec')
click.echo('....Performing Inference {}'.format(emoji.emojize(':boom:', use_aliases=True)))
@cli.command()
@click.option('--document', prompt='Input Document', help='ex. document_1.txt -- the document that you would like to query')
@click.option('--input_sentence', prompt='Quote', help='ex. document_1.txt -- the document that you would like to query')
def quote(document, input_sentence):
"""
Find a quote in the document
"""
article = Article(document)
article.download()
article.parse()
doc = nlp(article.text)
sentences = [sent.string.strip() for sent in doc.sents]
articledf = pd.DataFrame({"sentence": sentences})
articledf['name'] = 'article1'
click.echo('....Performing Inference {}'.format(emoji.emojize(':boom:', use_aliases=True)))
tokens = gensim.utils.simple_preprocess(input_sentence)
vec = model.infer_vector(tokens)
sim = model.docvecs.most_similar(positive=[vec], topn=1)
quotes = articledf['sentence'][sim[0][0]]
click.echo(quotes)
@cli.command()
@click.option('--document', prompt='Input Document', help='ex. document_1.txt -- the document that you would like to query')
def summary(document):
"""
Get a summary of the document
"""
article = Article(document)
article.download()
article.parse()
article_text = article.text.replace('\n', '').replace('\t', '')
click.echo('....Performing Inference {}'.format(emoji.emojize(':boom:', use_aliases=True)))
summary_text = summarize(str(article_text))
click.echo("")
click.echo("")
click.echo(summary_text)
click.echo("")
click.echo("")
@cli.command()
@click.option('--document', prompt='Input Document', help='ex. document_1.txt -- the document that you would like to query')
def words(document):
"""
List the keywords in the document
"""
article = Article(document)
article.download()
article.parse()
article_text = article.text.replace('\n', '').replace('\t', '')
click.echo('....Performing Inference {}'.format(emoji.emojize(':boom:', use_aliases=True)))
document_keywords = keywords(article_text).split('\n')
keywords_df = | pd.DataFrame({"keywords": document_keywords}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 3 12:24:27 2018
@author: <NAME>
"""
"""
python script to scrape the results from unitedstateszipcodes and save to a file
"""
from bs4 import BeautifulSoup
import os
import pandas as pd
from selenium import webdriver
from fake_useragent import UserAgent
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.proxy import Proxy, ProxyType
import time
co = webdriver.ChromeOptions()
#co.add_argument("headless")
prefs={"profile.managed_default_content_settings.images": 2, 'disk-cache-size': 4096 }
co.add_experimental_option('prefs', prefs)
def get_proxies(co=co):
driver = webdriver.Chrome("chromedriver.exe", options=co)
driver.get("https://www.us-proxy.org/")
PROXIES = []
proxies = driver.find_elements_by_css_selector("tr[role='row']")
for p in proxies:
result = p.text.split(" ")
if result[-1] == "yes":
PROXIES.append(result[0]+":"+result[1])
driver.close()
return PROXIES
ALL_PROXIES = get_proxies()
def proxy_driver(PROXIES, co=co):
prox = Proxy()
ua=UserAgent()
while True:
if PROXIES:
pxy = PROXIES[-1]
break
else:
print("--- Proxies used up (%s)" % len(PROXIES))
PROXIES = get_proxies()
prox.proxy_type = ProxyType.MANUAL
prox.http_proxy = pxy
#prox.socks_proxy = pxy
prox.ssl_proxy = pxy
capabilities = dict(DesiredCapabilities.CHROME)
capabilities["chrome.page.settings.userAgent"] = (ua.random)
prox.add_to_capabilities(capabilities)
service_args=['--ssl-protocol=any','--ignore-ssl-errors=true']
driver = webdriver.Chrome("chromedriver.exe", options=co, desired_capabilities=capabilities,service_args=service_args)
return driver
def scrape_results(soup, zipcode):
corn = BeautifulSoup(soup.page_source, "lxml")
doc = {'zipcode' : zipcode, 'id' : zipcode[:3]}
for tab in corn.findAll('table'):
if "Population Density" in str(tab):
heads = tab.findAll('th')
vals = tab.findAll('td', {'class' : 'text-right'})
for i, head in enumerate(heads):
doc[head.text] = vals[i].text
if "Land Area" in str(tab):
heads = tab.findAll('th')
vals = tab.findAll('td', {'class' : 'text-right'})
for i, head in enumerate(heads):
doc[head.text] = vals[i].text
return doc
dr = proxy_driver(ALL_PROXIES)
dr.delete_all_cookies()
header = [u'Housing Units', 'zipcode', u'Water Area', u'Median Home Value', u'Median Household Income',
u'Population Density', u'Occupied Housing Units', u'Population', 'id', u'Land Area']
if os.path.isfile("E:/Cognitive Computing BIA662/Project/scraped_results.csv"):
data = | pd.read_csv("E:/Cognitive Computing BIA662/Project/scraped_results.csv", na_values=0, dtype={'zipcode':str}) | pandas.read_csv |
# %%
import os
import pandas as pd
import numpy as np
import threading
import time
base_dir = os.getcwd()
# %%
# 初始化表头
header = ['user', 'n_op', 'n_trans', 'op_type_0', 'op_type_1', 'op_type_2', 'op_type_3', 'op_type_4', 'op_type_5',
'op_type_6', 'op_type_7', 'op_type_8', 'op_type_9', 'op_type_perc', 'op_type_std', 'op_type_n', 'op_mode_0',
'op_mode_1', 'op_mode_2', 'op_mode_3', 'op_mode_4', 'op_mode_5', 'op_mode_6', 'op_mode_7', 'op_mode_8',
'op_mode_9', 'op_mode_perc', 'op_mode_std', 'op_mode_n', 'op_device_perc', 'op_device_std',
'op_device_nan_perc', 'op_device_n', 'op_ip_perc', 'op_ip_std', 'op_ip_nan_perc', 'op_ip_n', 'op_net_type_0',
'op_net_type_1', 'op_net_type_2', 'op_net_type_3', 'op_net_type_perc', 'op_net_type_std',
'op_net_type_nan_perc', 'op_channel_0', 'op_channel_1', 'op_channel_2', 'op_channel_3', 'op_channel_4',
'op_channel_perc', 'op_channel_std', 'op_channel_n', 'op_ip_3_perc', 'op_ip_3_std', 'op_ip_3_nan_perc',
'op_ip_3_n', 'op_ip_3_ch_freq', 'op_ip_48h_n', 'op_device_48h_n',
'op_48h_n', 'trans_platform_0', 'trans_platform_1', 'trans_platform_2', 'trans_platform_3',
'trans_platform_4', 'trans_platform_5', 'trans_platform_perc', 'trans_platform_std', 'trans_platform_n',
'trans_tunnel_in_0', 'trans_tunnel_in_1', 'trans_tunnel_in_2', 'trans_tunnel_in_3', 'trans_tunnel_in_4',
'trans_tunnel_in_5', 'trans_tunnel_in_perc', 'trans_tunnel_in_std', 'trans_tunnel_in_n',
'trans_tunnel_in_nan_perc', 'trans_tunnel_out_0', 'trans_tunnel_out_1', 'trans_tunnel_out_2',
'trans_tunnel_out_3', 'trans_tunnel_out_perc', 'trans_tunnel_out_std', 'trans_tunnel_n', 'trans_amount_max',
'trans_amount_avg', 'trans_amount_std', 'trans_type1_0', 'trans_type1_1', 'trans_type1_2', 'trans_type1_3',
'trans_type1_4', 'trans_type1_perc', 'trans_type1_std', 'trans_ip_perc', 'trans_ip_std', 'trans_ip_nan_perc',
'trans_ip_n', 'trans_type2_0', 'trans_type2_1', 'trans_type2_2', 'trans_type2_3', 'trans_type2_4',
'trans_type2_perc', 'trans_type2_std', 'trans_ip_3_perc', 'trans_ip_3_std', 'trans_ip_3_nan_perc',
'trans_ip_3_n', 'trans_ip_3_ch_freq',
'trans_amount_48h_n', 'trans_48h_n', 'trans_platform_48h_n', 'trans_ip_48h_n']
print(len(header))
# %%
feature_train = pd.DataFrame(columns=header)
feature_test_a = pd.DataFrame(columns=header)
feature_test_b = pd.DataFrame(columns=header)
train_base_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_base.csv')
train_op_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_op.csv')
train_trans_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_trans.csv')
test_a_base_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_base.csv')
test_a_op_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_op.csv')
test_a_trans_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_trans.csv')
test_b_base_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_b_base.csv')
test_b_op_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_b_op.csv')
test_b_trans_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_b_trans.csv')
n_train = len(train_base_df)
n_test_a = len(test_a_base_df)
n_test_b = len(test_b_base_df)
# %%
# load encoder
op_type = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_op_type.csv')
mp_op_type = {}
for col in op_type.columns.values:
mp_op_type[col] = op_type[col].values
op_mode = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_op_mode.csv')
mp_op_mode = {}
for col in op_mode.columns.values:
mp_op_mode[col] = op_mode[col].values
net_type = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_op_net_type.csv')
mp_net_type = {}
for col in net_type.columns.values:
mp_net_type[col] = net_type[col].values
channel = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_op_channel.csv')
mp_channel = {}
for col in channel.columns.values:
mp_channel[col] = channel[col].values
platform = | pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_trans_platform.csv') | pandas.read_csv |
from datetime import datetime
import backtrader as bt
from backtrader import cerebro
from django.conf import settings
from django.contrib.auth.models import User
from django.http import HttpResponse
from rest_framework import permissions, viewsets
from rest_framework.response import Response
from rest_framework.views import APIView
from .dataSources.gFinance.StockDataFeed import StockDataFeed, database_index
from .models import employee, knowledge
from .serializers import (UserSerializer, employeesSerializer,
knowledgeSerializer)
from .utility.File import File
from ta.trend import SMAIndicator
import pandas as pd
stock_data_feed = StockDataFeed()
file = File()
knowledge_db = file.read(file_name = 'knowledge_db', path = settings.BASE_DIR + '\\app\\resources\\', extension = 'json');
holdings = file.read(path='C:\\Users\\91880\\Downloads', file_name='holdings', extension='csv')
class SMA(APIView):
def get(self, request):
start_time = datetime.now()
data = stock_data_feed.get_data(['ITC'])
itc = data['ITC']
df = pd.DataFrame(data=itc[1:5], columns=itc[0])
df.Close = | pd.to_numeric(df.Close, downcast="float") | pandas.to_numeric |
import itertools
import numpy as np
import pandas as pd
import pytest
from estimagic.estimation.msm_weighting import assemble_block_diagonal_matrix
from estimagic.estimation.msm_weighting import get_weighting_matrix
from numpy.testing import assert_array_almost_equal as aaae
@pytest.fixture
def expected_values():
values = np.array([[1, 2, 0, 0], [3, 4, 0, 0], [0, 0, 5, 6], [0, 0, 7, 8]])
return values
cov_np = np.diag([1, 2, 3])
cov_pd = pd.DataFrame(cov_np)
test_cases = itertools.product([cov_np, cov_pd], ["diagonal", "optimal"])
@pytest.mark.parametrize("moments_cov, method", test_cases)
def test_get_weighting_matrix(moments_cov, method):
calculated = get_weighting_matrix(moments_cov, method)
if isinstance(moments_cov, pd.DataFrame):
assert calculated.index.equals(moments_cov.index)
assert calculated.columns.equals(moments_cov.columns)
calculated = calculated.to_numpy()
expected = np.diag(1 / np.array([1, 2, 3]))
aaae(calculated, expected)
def test_assemble_block_diagonal_matrix_pd(expected_values):
matrices = [
| pd.DataFrame([[1, 2], [3, 4]]) | pandas.DataFrame |
import pandas as pd
import numpy as np
def btk_data_decoy_old():
df = pd.read_csv('btk_active_decoy/BTK_2810_old.csv')
df_decoy = pd.read_csv('btk_active_decoy/btk_finddecoy.csv')
df_decoy = pd.DataFrame(df_decoy['smile'])
df_decoy['label'] = 0
df_active = df[df['target2']<300]
df_active['target2'] = 1
df_ic_decoy = df[df['target2']>9000]
df_ic_decoy['target2'] = 0
del df_active['target1'],df_ic_decoy['target1']
df_active.columns = df_ic_decoy.columns = df_decoy.columns = ['smiles','label']
df_all = pd.concat([df_active,df_ic_decoy])
df_all = pd.concat([df_all,df_decoy])
df_all.to_csv('btk_active_decoy/btk_2810_add_decoy_old.csv',index=None)
def btk_data_cut_decoy():
df = pd.read_csv('btk_active_decoy/BTK_2810_old.csv')
df_decoy = pd.read_csv('btk_active_decoy/btk_finddecoy.csv')
df_cut_decoy = pd.read_csv('btk_active_decoy/similarity_active_decoy.csv')
df_cut_decoy = df_cut_decoy.head(1139)#1139是根据正样本1393个,乘以10比例13930,原先数据decoy总量为15069,15069-13930=1139
df_decoy = pd.DataFrame(df_decoy['smile'])
df_decoy['label'] = 0
df_active = df[df['target2']<300]
df_active['target2'] = 1
df_ic_decoy = df[df['target2']>9000]
df_ic_decoy['target2'] = 0
del df_active['target1'],df_ic_decoy['target1']
df_active.columns = df_ic_decoy.columns = df_decoy.columns = ['smiles','label']
df_all = pd.concat([df_active,df_ic_decoy])
df_all = pd.concat([df_all,df_decoy])
df_all_filter = df_all[~ df_all['smiles'].isin(df_cut_decoy['train_smiles'])]
df_all_filter.to_csv('btk_active_decoy/btk_2810_cut_decoy.csv',index=None)
def btk_data_decoy():
df = | pd.read_csv('btk_active_decoy/BTK_2810.csv') | pandas.read_csv |
# Python for Healthcare
## Hospital Spending
### Import Libraries
import pandas as pd
import statsmodels.api as sm
### Import Data
df_cms = | pd.read_csv('C:/Users/drewc/GitHub/python-for-healthcare/pynarratives/hospital_spending/_data/cms_mspb_stage.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
import random
import pickle
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import torch
from torch.nn.utils.rnn import pad_sequence
from utils import _get_parcel, _get_behavioral
from cc_utils import _get_clip_labels
K_RUNS = 4
K_SEED = 330
def _get_clip_seq(df, subject_list, args):
'''
return:
X: input seq (batch_size x time x feat_size)
y: label seq (batch_size x time)
X_len: len of each seq (batch_size x 1)
batch_size <-> number of sequences
time <-> max length after padding
'''
features = [ii for ii in df.columns if 'feat' in ii]
X = []
y = []
for subject in subject_list:
for i_class in range(args.k_class):
if i_class==0: # split test-retest into 4
seqs = df[(df['Subject']==subject) &
(df['y'] == 0)][features].values
label_seqs = df[(df['Subject']==subject) &
(df['y'] == 0)]['y'].values
k_time = int(seqs.shape[0]/K_RUNS)
for i_run in range(K_RUNS):
seq = seqs[i_run*k_time:(i_run+1)*k_time, :]
label_seq = label_seqs[i_run*k_time:(i_run+1)*k_time]
if args.zscore:
# zscore each seq that goes into model
seq = (1/np.std(seq))*(seq - np.mean(seq))
X.append(torch.FloatTensor(seq))
y.append(torch.LongTensor(label_seq))
else:
seq = df[(df['Subject']==subject) &
(df['y'] == i_class)][features].values
label_seq = df[(df['Subject']==subject) &
(df['y'] == i_class)]['y'].values
if args.zscore:
# zscore each seq that goes into model
seq = (1/np.std(seq))*(seq - np.mean(seq))
X.append(torch.FloatTensor(seq))
y.append(torch.LongTensor(label_seq))
X_len = torch.LongTensor([len(seq) for seq in X])
# pad sequences
X = pad_sequence(X, batch_first=True, padding_value=0)
y = pad_sequence(y, batch_first=True, padding_value=-100)
return X.to(args.device), X_len.to(args.device), y.to(args.device)
def _clip_class_df(args):
'''
data for 15-way clip classification
args.roi: number of ROIs
args.net: number of subnetworks (7 or 17)
args.subnet: subnetwork; 'wb' if all subnetworks
args.invert_flag: all-but-one subnetwork
args.r_roi: number of random ROIs to pick
args.r_seed: random seed for picking ROIs
save each timepoint as feature vector
append class label based on clip
return:
pandas df
'''
load_path = (args.input_data + '/data_MOVIE_runs_%s' %(args.roi_name) +
'_%d_net_%d_ts.pkl' %(args.roi, args.net))
with open(load_path, 'rb') as f:
data = pickle.load(f)
# where are the clips within the run?
timing_file = pd.read_csv('data/videoclip_tr_lookup.csv')
'''
main
'''
clip_y = _get_clip_labels()
table = []
for run in range(K_RUNS):
print('loading run %d/%d' %(run+1, K_RUNS))
run_name = 'MOVIE%d' %(run+1) #MOVIEx_7T_yz
# timing file for run
timing_df = timing_file[
timing_file['run'].str.contains(run_name)]
timing_df = timing_df.reset_index(drop=True)
for subject in data:
# get subject data (time x roi x run)
vox_ts = data[subject][:, :, run]
for jj, clip in timing_df.iterrows():
start = int(np.floor(clip['start_tr']))
stop = int(np.ceil(clip['stop_tr']))
clip_length = stop - start
# assign label to clip
y = clip_y[clip['clip_name']]
for t in range(clip_length):
act = vox_ts[t + start, :]
t_data = {}
t_data['Subject'] = subject
t_data['timepoint'] = t
for feat in range(vox_ts.shape[1]):
t_data['feat_%d' %(feat)] = act[feat]
t_data['y'] = y
table.append(t_data)
df = pd.DataFrame(table)
df['Subject'] = df['Subject'].astype(int)
return df
def _clip_class_rest_df(args, run):
'''
data for 15 clip + rest visualization
each run is saved individually
run: 0, 1, 2, 3 (one of the 4 runs)
args.roi: number of ROIs
args.net: number of subnetworks (7 or 17)
args.subnet: subnetwork; 'wb' if all subnetworks
args.invert_flag: all-but-one subnetwork
args.r_roi: number of random ROIs to pick
args.r_seed: random seed for picking ROIs
save each timepoint as feature vector
append class label based on clip
return:
pandas df
'''
# optional arguments
d = vars(args)
if 'invert_flag' not in d:
args.invert_flag = False
if 'r_roi' not in d:
args.r_roi = 0
args.r_seed = 0
load_path = (args.input_data + '/data_MOVIE_runs_' +
'roi_%d_net_%d_ts.pkl' %(args.roi, args.net))
with open(load_path, 'rb') as f:
data = pickle.load(f)
# where are the clips within the run?
timing_file = pd.read_csv('data/videoclip_tr_lookup.csv')
# pick either all ROIs or subnetworks
if args.subnet!='wb':
if 'minus' in args.subnet:
# remove 'minus_' prefix
args.subnet = args.subnet.split('minus_')[1]
_, nw_info = _get_parcel(args.roi, args.net)
# ***roi ts sorted in preprocessing
nw_info = np.sort(nw_info)
idx = (nw_info == args.subnet)
else:
idx = np.ones(args.roi).astype(bool)
# all-but-one subnetwork
if args.subnet and args.invert_flag:
idx = ~idx
# if random selection,
# overwrite everything above
if args.r_roi > 0:
random.seed(args.r_seed)
idx = np.zeros(args.roi).astype(bool)
# random sample without replacement
samp = random.sample(range(args.roi), k=args.r_roi)
idx[samp] = True
'''
main
'''
print('loading run %d' %(run+1))
run_name = 'MOVIE%d' %(run+1) #MOVIEx_7T_yz
timing_df = timing_file[timing_file['run'].str.contains(run_name)]
timing_df = timing_df.reset_index(drop=True)
# get unique id for each segment including rest segments
length = data[list(data.keys())[0]][:, :, run].shape[0]
k_class = len(timing_df)
y_vec = np.ones(length)*len(timing_df)
for jj, clip in timing_df.iterrows():
start = int(np.floor(clip['start_tr']))
if jj==0:
tag = k_class
y_vec[:start] = tag
tag += 1
else:
y_vec[stop:start] = tag
tag += 1
stop = int(np.ceil(clip['stop_tr']))
clip_length = stop - start
y_vec[start:stop] = jj
y_vec[stop:] = tag
table = []
for subject in data:
roi_ts = data[subject][:, idx, run]
for t in range(roi_ts.shape[0]):
act = roi_ts[t, :]
t_data = {}
t_data['Subject'] = subject
t_data['timepoint'] = t
t_data['y'] = y_vec[t]
for feat in range(roi_ts.shape[1]):
t_data['feat_%d' %(feat)] = act[feat]
table.append(t_data)
df = pd.DataFrame(table)
df['Subject'] = df['Subject'].astype(int)
return df
def _get_bhv_seq(df, subject_list, args):
'''
return:
X: input seq (batch_size x time x feat_size)
y: label seq (batch_size x time)
in {0, 1, ..} if args.mode=='class'
in R if args.mode=='reg'
c: clip seq (batch_size x time)
X_len: len of each seq (batch_size x 1)
batch_size <-> number of sequences
time <-> max length after padding
'''
# optional arguments
d = vars(args)
# regression or classification
if 'mode' not in d:
args.mode = 'class'
if args.mode=='class':
label = 'y'
elif args.mode=='reg':
label = args.bhv
# permutation test
if 'shuffle' not in d:
args.shuffle = False
if args.shuffle:
# different shuffle for each iteration
np.random.seed(args.i_seed)
# get scores for all participants without bhv_df
train_label = df[(df['Subject'].isin(subject_list)) &
(df['c']==1) & (df['timepoint']==0)][label].values
np.random.shuffle(train_label) # inplace
k_clip = len(np.unique(df['c']))
features = [ii for ii in df.columns if 'feat' in ii]
X = []
y = []
c = []
for ii, subject in enumerate(subject_list):
for i_clip in range(k_clip):
if i_clip==0: #handle test retest differently
seqs = df[(df['Subject']==subject) &
(df['c'] == 0)][features].values
if args.shuffle:
label_seqs = np.ones(seqs.shape[0])*train_label[ii]
else:
label_seqs = df[(df['Subject']==subject) &
(df['c'] == 0)][label].values
clip_seqs = df[(df['Subject']==subject) &
(df['c'] == 0)]['c'].values
k_time = int(seqs.shape[0]/K_RUNS)
for i_run in range(K_RUNS):
seq = seqs[i_run*k_time:(i_run+1)*k_time, :]
label_seq = label_seqs[i_run*k_time:(i_run+1)*k_time]
clip_seq = clip_seqs[i_run*k_time:(i_run+1)*k_time]
if args.zscore:
# zscore each seq that goes into model
seq = (1/np.std(seq))*(seq - np.mean(seq))
X.append(torch.FloatTensor(seq))
if args.mode=='class':
y.append(torch.LongTensor(label_seq))
elif args.mode=='reg':
y.append(torch.FloatTensor(label_seq))
c.append(torch.LongTensor(clip_seq))
else:
seq = df[(df['Subject']==subject) &
(df['c'] == i_clip)][features].values
if args.shuffle:
label_seq = np.ones(seq.shape[0])*train_label[ii]
else:
label_seq = df[(df['Subject']==subject) &
(df['c'] == i_clip)][label].values
clip_seq = df[(df['Subject']==subject) &
(df['c'] == i_clip)]['c'].values
if args.zscore:
# zscore each seq that goes into model
seq = (1/np.std(seq))*(seq - np.mean(seq))
X.append(torch.FloatTensor(seq))
if args.mode=='class':
y.append(torch.LongTensor(label_seq))
elif args.mode=='reg':
y.append(torch.FloatTensor(label_seq))
c.append(torch.LongTensor(clip_seq))
X_len = torch.LongTensor([len(seq) for seq in X])
# pad sequences
X = pad_sequence(X, batch_first=True, padding_value=0)
y = pad_sequence(y, batch_first=True, padding_value=-100)
c = pad_sequence(c, batch_first=True, padding_value=-100)
return (X.to(args.device), X_len.to(args.device),
y.to(args.device), c.to(args.device))
def _group_bhv_df(args, subject_list):
'''
based on behavioral score,
group participants into clusters
if k_class==2:
group top cutoff and bot cutoff
if k_class > 2:
use k_means for grouping
return:
if args.mode=='class'
bhv_df: ['Subject', bhv, 'y']
if args.mode=='reg'
bhv_df: ['Subject', bhv, 'y']
*** return 'y' in reg mode
for kfold balancing
'''
# for kfold balancing
if args.mode=='reg':
args.k_class = 2
# get behavioral data for subject_list
bhv_df = _get_behavioral(subject_list)
bhv_df = bhv_df[['Subject', args.bhv]]
'''
***normalize bhv scores
must be explicitly done for pytorch
'''
b = bhv_df[args.bhv].values
bhv_df[args.bhv] = (b - np.min(b))/(np.max(b) - np.min(b))
# reduce subjects by picking top and bottom 'cutoff' percent
_x = np.sort(bhv_df[args.bhv].values)
percentile = int(np.floor(args.cutoff*len(subject_list)))
bot_cut = _x[percentile]
top_cut = _x[-percentile]
bhv_df = bhv_df[(bhv_df[args.bhv] >= top_cut) |
(bhv_df[args.bhv] <= bot_cut)]
'''
behavioral groups: into 'k_class'
'''
if args.k_class > 2:
_x = bhv_df[[args.bhv]].values
model = KMeans(n_clusters=args.k_class,
random_state=K_SEED)
y = model.fit_predict(_x)
# each participant assigned a label
bhv_df['y'] = y
else:
b = bhv_df[args.bhv].values
y = [1 if ii>=top_cut else 0 for ii in b]
bhv_df['y'] = np.array(y)
return bhv_df
def _bhv_class_df(args):
'''
data for k_class bhv classification
*** used for both classification and regression
args.mode: 'class' or bhv'
args.roi: number of ROIs
args.net: number of subnetworks (7 or 17)
args.subnet: subnetwork; 'wb' if all subnetworks
args.bhv: behavioral measure
args.k_class: number of behavioral groups
args.cutoff: percentile for participant cutoff
args.invert_flag: all-but-one subnetwork
save each timepoint as feature vector
append 'c' based on clip
append 'y' based on behavioral group
'''
# optional arguments
d = vars(args)
if 'invert_flag' not in d:
args.invert_flag = False
if 'mode' not in d:
args.mode = 'class'
load_path = (args.input_data + '/data_MOVIE_runs_' +
'roi_%d_net_%d_ts.pkl' %(args.roi, args.net))
with open(load_path, 'rb') as f:
data = pickle.load(f)
subject_list = np.sort(list(data.keys()))
bhv_df = _group_bhv_df(args, subject_list)
cutoff_list = bhv_df['Subject'].values.astype(str)
# where are the clips within the run?
timing_file = | pd.read_csv('data/videoclip_tr_lookup.csv') | pandas.read_csv |
#!/usr/bin/env python
# MIT License
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# This file is a part of the CIRN Interaction Language.
from __future__ import print_function, division
import logging
import numpy as np
import pandas as pd
import pyflux as pf
def predict_all_freqs(data, training_lag, steps):
"""Takes a numpy matrix containing duty cycles with time in the first
coordinate and frequency in the second, and calculates another numpy
matrix containing the VAR predictions with steps many new time indices
and the same number of frequencies. We use a single model across
frequencies (this is NOT how spec-val is implemented)."""
columns = [str(i) for i in range(data.shape[1])]
data = | pd.DataFrame(data, columns=columns) | pandas.DataFrame |
import pandas as pd
from datascope.importance.shapley import ImportanceMethod
from enum import Enum
from pandas import DataFrame
from typing import Any, Optional, Dict
from .base import Scenario, attribute, result
from ..dataset import Dataset, DEFAULT_TRAINSIZE, DEFAULT_VALSIZE, DEFAULT_TESTSIZE
from ..pipelines import Pipeline, ModelType
class RepairMethod(str, Enum):
KNN_Single = "shapley-knn-single"
KNN_Interactive = "shapley-knn-interactive"
TMC_1 = "shapley-tmc-001"
TMC_5 = "shapley-tmc-005"
TMC_10 = "shapley-tmc-010"
TMC_50 = "shapley-tmc-050"
TMC_100 = "shapley-tmc-100"
TMC_500 = "shapley-tmc-500"
TMC_PIPE_1 = "shapley-tmc-pipe-001"
TMC_PIPE_5 = "shapley-tmc-pipe-005"
TMC_PIPE_10 = "shapley-tmc-pipe-010"
TMC_PIPE_50 = "shapley-tmc-pipe-050"
TMC_PIPE_100 = "shapley-tmc-pipe-100"
TMC_PIPE_500 = "shapley-tmc-pipe-500"
RANDOM = "random"
@staticmethod
def is_pipe(method: "RepairMethod") -> bool:
return method in [
RepairMethod.TMC_PIPE_1,
RepairMethod.TMC_PIPE_5,
RepairMethod.TMC_PIPE_10,
RepairMethod.TMC_PIPE_50,
RepairMethod.TMC_PIPE_100,
RepairMethod.TMC_PIPE_500,
]
@staticmethod
def is_tmc(method: "RepairMethod") -> bool:
return method in [
RepairMethod.TMC_1,
RepairMethod.TMC_5,
RepairMethod.TMC_10,
RepairMethod.TMC_50,
RepairMethod.TMC_100,
RepairMethod.TMC_500,
RepairMethod.TMC_PIPE_1,
RepairMethod.TMC_PIPE_5,
RepairMethod.TMC_PIPE_10,
RepairMethod.TMC_PIPE_50,
RepairMethod.TMC_PIPE_100,
RepairMethod.TMC_PIPE_500,
]
@staticmethod
def is_tmc_nonpipe(method: "RepairMethod") -> bool:
return method in [
RepairMethod.TMC_1,
RepairMethod.TMC_5,
RepairMethod.TMC_10,
RepairMethod.TMC_50,
RepairMethod.TMC_100,
RepairMethod.TMC_500,
]
class UtilityType(str, Enum):
ACCURACY = "acc"
EQODDS = "eqodds"
EQODDS_AND_ACCURACY = "eqodds-acc"
IMPORTANCE_METHODS = {
RepairMethod.KNN_Single: ImportanceMethod.NEIGHBOR,
RepairMethod.KNN_Interactive: ImportanceMethod.NEIGHBOR,
RepairMethod.TMC_1: ImportanceMethod.MONTECARLO,
RepairMethod.TMC_5: ImportanceMethod.MONTECARLO,
RepairMethod.TMC_10: ImportanceMethod.MONTECARLO,
RepairMethod.TMC_50: ImportanceMethod.MONTECARLO,
RepairMethod.TMC_100: ImportanceMethod.MONTECARLO,
RepairMethod.TMC_500: ImportanceMethod.MONTECARLO,
RepairMethod.TMC_PIPE_1: ImportanceMethod.MONTECARLO,
RepairMethod.TMC_PIPE_5: ImportanceMethod.MONTECARLO,
RepairMethod.TMC_PIPE_10: ImportanceMethod.MONTECARLO,
RepairMethod.TMC_PIPE_50: ImportanceMethod.MONTECARLO,
RepairMethod.TMC_PIPE_100: ImportanceMethod.MONTECARLO,
RepairMethod.TMC_PIPE_500: ImportanceMethod.MONTECARLO,
}
MC_ITERATIONS = {
RepairMethod.KNN_Single: 0,
RepairMethod.KNN_Interactive: 0,
RepairMethod.TMC_1: 1,
RepairMethod.TMC_5: 5,
RepairMethod.TMC_10: 10,
RepairMethod.TMC_50: 50,
RepairMethod.TMC_100: 100,
RepairMethod.TMC_500: 500,
RepairMethod.TMC_PIPE_1: 1,
RepairMethod.TMC_PIPE_5: 5,
RepairMethod.TMC_PIPE_10: 10,
RepairMethod.TMC_PIPE_50: 50,
RepairMethod.TMC_PIPE_100: 100,
RepairMethod.TMC_PIPE_500: 500,
}
KEYWORD_REPLACEMENTS = {
"random": "Random",
"shapley-tmc": "Shapley TMC",
"shapley-knn-single": "Shapley KNN Single",
"shapley-knn-interactive": "Shapley KNN Interactive",
"shapley-tmc-001": "Shapley TMC x1",
"shapley-tmc-005": "Shapley TMC x5",
"shapley-tmc-010": "Shapley TMC x10",
"shapley-tmc-050": "Shapley TMC x50",
"shapley-tmc-100": "Shapley TMC x100",
"shapley-tmc-500": "Shapley TMC x500",
"shapley-tmc-pipe-001": "Shapley TMC Pipe x1",
"shapley-tmc-pipe-005": "Shapley TMC Pipe x5",
"shapley-tmc-pipe-010": "Shapley TMC Pipe x10",
"shapley-tmc-pipe-050": "Shapley TMC Pipe x50",
"shapley-tmc-pipe-100": "Shapley TMC Pipe x100",
"shapley-tmc-pipe-500": "Shapley TMC Pipe x500",
"eqodds": "Equalized Odds Difference",
"importance_compute_time": "Importance Compute Time [s]",
"steps": "Repair Steps Taken",
"steps_rel": "Relative Repair Steps Taken",
"acc": "Accuracy",
"eqodds-acc": "Accuracy + Equalized Odds Difference",
}
DEFAULT_SEED = 1
DEFAULT_CHECKPOINTS = 100
DEFAULT_PROVIDERS = 0
DEFAULT_TIMEOUT = 3600
DEFAULT_MODEL = ModelType.LogisticRegression
class DatascopeScenario(Scenario):
def __init__(
self,
dataset: str,
pipeline: str,
method: RepairMethod,
utility: UtilityType,
iteration: int,
model: ModelType = DEFAULT_MODEL,
seed: int = DEFAULT_SEED,
trainsize: int = DEFAULT_TRAINSIZE,
valsize: int = DEFAULT_VALSIZE,
testsize: int = DEFAULT_TESTSIZE,
timeout: int = DEFAULT_TIMEOUT,
checkpoints: int = DEFAULT_CHECKPOINTS,
providers: int = DEFAULT_PROVIDERS,
evolution: Optional[pd.DataFrame] = None,
importance_compute_time: Optional[float] = None,
**kwargs: Any
) -> None:
super().__init__(**kwargs)
self._dataset = dataset
self._pipeline = pipeline
self._method = method
self._utility = utility
self._iteration = iteration
self._model = model
self._seed = seed
self._trainsize = trainsize
self._valsize = valsize
self._testsize = testsize
self._timeout = timeout
self._checkpoints = checkpoints
self._providers = providers
self._evolution = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import joblib
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
def impute_age(cols):
age=cols[0]
pClass = cols[1]
if pd.isnull(age):
if pClass == 1:
return 37
elif pClass == 2:
return 29
else:
return 24
else:
return age
train_df = pd.read_csv('titanic_train.csv')
train_df['Age'] = train_df[['Age','Pclass']].apply(impute_age,axis=1)
gender = pd.get_dummies(train_df['Sex'],drop_first=True)
ebarked = | pd.get_dummies(train_df['Embarked'],drop_first=True) | pandas.get_dummies |
import glob
import pandas as pd
files = glob.glob('Corpus_mda/*')
files.sort()
df_agg1 = pd.DataFrame()
for i, file in enumerate(files[0:2000]):
# print(i)
df_agg1 = df_agg1.append(pd.read_pickle(file))
df_agg1.to_pickle('mda_agg/mda_agg1.pkl')
df_agg2 = | pd.DataFrame() | pandas.DataFrame |
# The EsmcolValidate class defined below is an adaptation of the
# stac-validator: https://github.com/sparkgeo/stac-validator
# For reference, here is a copy of the stac-validator copyright notice:
# Copyright 2019 Sparkgeo
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import sys
import tempfile
from functools import lru_cache
from json.decoder import JSONDecodeError
from pathlib import Path
from urllib.parse import urlparse
import requests
from jsonschema import ValidationError, validate
logger = logging.getLogger(__name__)
class Esmcol:
def __init__(self, version='master', input_type='', filename=''):
self.input_type = input_type
self.filename = filename
self.version = version
self._determine_version()
def _determine_version(self):
git_base_url = f'https://raw.githubusercontent.com/NCAR/esm-collection-spec/{self.version}'
self.COLLECTION_URL = f'{git_base_url}/collection-spec/{self.input_type}/{self.filename}'
@classmethod
def collection_schema_url(cls, version, filename='collection.json'):
return cls(version, 'json-schema', filename).COLLECTION_URL
class VersionException(Exception):
pass
class EsmcolValidate:
def __init__(self, esmcol_file, esmcol_spec_dirs=None, version='master', log_level='CRITICAL'):
""" Validate an ESMCol file
Parameters
----------
esmcol_file : str
File to validate
esmcol_spec_dirs : list
List of local specification directories to check for JSON schema files.
version : str, defaults to `master`
ESMcat version to validate against. Uses github tags from the esm-collection-repo. e.g.: v0.1.0
log_level : str
Level of logging to report
"""
numeric_log_level = getattr(logging, log_level.upper(), None)
if not isinstance(numeric_log_level, int):
raise ValueError(f'Invalid log level: {log_level}')
logging.basicConfig(
format='%(asctime)s : %(levelname)s : %(thread)d : %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=numeric_log_level,
)
logging.info('ESMCol Validator Started.')
self.esmcat_version = version
self.esmcol_file = esmcol_file.strip()
self.dirpath = tempfile.mkdtemp()
self.esmcol_spec_dirs = self.check_none(esmcol_spec_dirs)
self.message = []
self.status = {
'collections': {'valid': 0, 'invalid': 0},
'catalogs': {'valid': 0, 'invalid': 0},
'unknown': 0,
}
@staticmethod
def check_none(input):
""" Checks if the string is None
Parameters
----------
input : str
Input string to check
"""
if input == 'None':
return None
try:
return input.split(',')
except AttributeError:
return input
except Exception:
logger.warning('Could not find input file')
@lru_cache(maxsize=48)
def fetch_spec(self, spec):
""" Get the spec file and cache it.
Parameters
----------
spec : str
Name of spec to get
Returns
-------
ESMCol spec in json format
"""
spec_name = spec
if self.esmcol_spec_dirs is None:
try:
logging.debug('Gathering ESMCol specs from remote.')
url = getattr(Esmcol, f'{spec_name}_schema_url')
spec = requests.get(url(self.esmcat_version)).json()
valid_dir = True
except Exception:
logger.exception('ESMCol Download Error')
raise VersionException(f'Could not download ESMCol specification')
else:
valid_dir = False
for esmcol_spec_dir in self.esmcol_spec_dirs:
spec_file = Path(esmcol_spec_dir) / f'{spec_name}.json'
if spec_file.is_file():
valid_dir = True
try:
logging.debug('Gather ESMCol specs from local directory.')
with open(spec_file, 'r') as f:
spec = json.load(f)
except FileNotFoundError:
try:
logger.critical(
f'Houston, we have a problem! Could not find spec file {spec_file}'
)
url = getattr(Esmcol, f'{spec_name}_schema_url')
spec = requests.get(url(self.esmcat_version)).json()
except:
logger.exception(
'The ESMCol specification file does not exist or does not match the ESMCol file you are trying '
'to validate. Please check your esmcol_spec_dirs path.'
)
sys.exit(1)
except Exception as e:
logging.exception(e)
if valid_dir:
file_name = (
Path(self.dirpath) / f"{spec_name}_{self.esmcat_version.replace('.','_')}.json"
)
with open(file_name, 'w') as fp:
logging.debug(f'Copying {spec_name} spec from local file to cache')
fp.write(json.dumps(spec))
else:
logger.exception(
'The ESMCol specification file does not exist or does not match the ESMCol file you are trying '
'to validate. Please check your esmcol_spec_dirs path.'
)
sys.exit(1)
return spec
def validate_json(self, content, schema):
""" Validate ESMCol
Parameters
----------
content : dict
input ESMCol file content
schema : dict
schema of ESMCol
Returns
-------
validation message
"""
try:
logging.info('Validating ESMCol')
validate(content, schema)
return True, None
except ValidationError as e:
logger.warning('ESMCol Validation Error')
return False, f'{e.message} of {list(e.path)}'
except Exception as e:
logger.exception('ESMCol error')
return False, f'{e}'
def validate_catalog(self, content):
"""
Validate Catalog content
Parameters
----------
content : dict
input ESMCol file content
Returns
-------
validation message
"""
import pandas as pd
try:
if 'catalog_file' in content:
catalog_content = content['catalog_file']
df = | pd.read_csv(catalog_content, index_col=0) | pandas.read_csv |
# %%
import os
import urllib
from bs4 import BeautifulSoup
import pandas as pd
import yfinance as yf
import pandas_datareader as dtr
import datetime
import time
from tqdm import tqdm
from copy import deepcopy
from talib import WILLR
from talib import EMA
# %%
HEADERS = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Max-Age': '3600',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'
}
# %%
data = pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
# %%
summary_table = data[0]
tickers = summary_table['Symbol'].values
# %%
sample_tickers = tickers[:10]
# %%
tickers_df = | pd.DataFrame(tickers) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn import linear_model
def get_round(df, num_teams):
matches = int(num_teams / 2)
matchday_list = sum([[i] * matches for i in range(1, 50)], [])
df["round"] = matchday_list[:df.shape[0]]
return df
def get_team_encoding(df):
"""
Creates unique team_Ids for the teams.
Creates a variable Result which is home_goals - away_goals
"""
labels, levels = pd.factorize( | pd.concat([df["HomeTeam"], df["AwayTeam"]]) | pandas.concat |
"""
Training script for scene graph detection. Integrated with my faster rcnn setup
"""
from dataloaders.visual_genome import VGDataLoader, VG
import numpy as np
from torch import optim
import torch
import pandas as pd
import time
import os
from tensorboardX import SummaryWriter
from config import ModelConfig, BOX_SCALE, IM_SCALE
from torch.nn import functional as F
from lib.pytorch_misc import optimistic_restore, de_chunkize, clip_grad_norm
from lib.evaluation.sg_eval import BasicSceneGraphEvaluator
from lib.pytorch_misc import print_para
from torch.optim.lr_scheduler import ReduceLROnPlateau
from lib.pytorch_misc import set_random_seed, log_depth_details, \
add_module_summary, remove_params
# -- Get model configuration
conf = ModelConfig()
# -- Set random seed
if conf.rnd_seed is not None:
set_random_seed(conf.rnd_seed)
# -- Import the specified model
if conf.model == 'motifnet':
from lib.rel_model import RelModel
elif conf.model == 'stanford':
from lib.rel_model_stanford import RelModelStanford as RelModel
# -- Depth-Fusion models --
elif conf.model == 'shz_depth':
from lib.shz_models.rel_model_depth import RelModel
elif conf.model == 'shz_depth_union':
from lib.shz_models.rel_model_depth_union import RelModel
elif conf.model == 'shz_fusion':
from lib.shz_models.rel_model_fusion import RelModel
elif conf.model == 'shz_fusion_beta':
from lib.shz_models.rel_model_fusion_beta import RelModel
# --
else:
raise ValueError()
# -- Create Tensorboard summary writer
writer = SummaryWriter(comment='_run#'+ conf.save_dir.split('/')[-1])
# -- Create dataset splits and dataset loader
train, val, _ = VG.splits(num_val_im=conf.val_size, filter_duplicate_rels=True,
use_proposals=conf.use_proposals,
filter_non_overlap=conf.mode == 'sgdet',
# -- Depth dataset parameters
use_depth=conf.load_depth,
three_channels_depth=conf.pretrained_depth)
train_loader, val_loader = VGDataLoader.splits(train, val, mode='rel',
batch_size=conf.batch_size,
num_workers=conf.num_workers,
num_gpus=conf.num_gpus,
# -- Depth dataset parameters
use_depth=conf.load_depth)
# -- Create the specified Relation-Detection model
detector = RelModel(classes=train.ind_to_classes, rel_classes=train.ind_to_predicates,
num_gpus=conf.num_gpus, mode=conf.mode, require_overlap_det=True,
use_resnet=conf.use_resnet, order=conf.order,
nl_edge=conf.nl_edge, nl_obj=conf.nl_obj, hidden_dim=conf.hidden_dim,
use_proposals=conf.use_proposals,
pass_in_obj_feats_to_decoder=conf.pass_in_obj_feats_to_decoder,
pass_in_obj_feats_to_edge=conf.pass_in_obj_feats_to_edge,
pooling_dim=conf.pooling_dim,
rec_dropout=conf.rec_dropout,
use_bias=conf.use_bias,
use_tanh=conf.use_tanh,
use_vision=conf.use_vision,
# -- The proposed model parameters
depth_model=conf.depth_model,
pretrained_depth=conf.pretrained_depth,
active_features=conf.active_features,
frozen_features=conf.frozen_features,
use_embed=conf.use_embed)
# -- Freeze the detector (Faster-RCNN)
for n, param in detector.detector.named_parameters():
param.requires_grad = False
# -- Print model parameters
print(print_para(detector), flush=True)
# -- Define training related functions
def is_conv_param_depth(name):
"""
Checks if the provided parameter is in the convolutional parameters list
:param name: parameter name
:return: `True` if the parameter is in the list
"""
if conf.depth_model in ["resnet18", "resnet50"]:
depth_conv_params = ['depth_backbone',
'depth_rel_head',
'depth_rel_head_union'
'depth_union_boxes']
else:
depth_conv_params = ['depth_backbone',
'depth_union_boxes']
for param in depth_conv_params:
if name.startswith(param):
return True
return False
def get_optim(lr):
# Lower the learning rate on the VGG fully connected layers by 1/10th. It's a hack, but it helps
# stabilize the models.
if conf.model.startswith('depth'):
fc_params = [p for n,p in detector.named_parameters()
if not is_conv_param_depth(n) and p.requires_grad]
non_fc_params = [p for n, p in detector.named_parameters()
if is_conv_param_depth(n) and p.requires_grad]
else:
fc_params = [p for n, p in detector.named_parameters() if n.startswith('roi_fmap') and p.requires_grad]
non_fc_params = [p for n, p in detector.named_parameters() if not n.startswith('roi_fmap') and p.requires_grad]
# -- Show the number of FC/non-FC parameters
print("#FC params:{}, #non-FC params:{}".format(len(fc_params),
len(non_fc_params)))
params = [{'params': fc_params, 'lr': lr / 10.0}, {'params': non_fc_params}]
if conf.adam:
optimizer = optim.Adam(params, lr=lr)
else:
optimizer = optim.SGD(params, weight_decay=conf.l2, lr=lr, momentum=0.9)
scheduler = ReduceLROnPlateau(optimizer, 'max', patience=6, factor=0.1,
verbose=True, threshold=0.0001, threshold_mode='abs', cooldown=1)
return optimizer, scheduler
# -- The parameters to be removed from the provided checkpoint
rm_params = ['rel_out.bias',
'rel_out.weight',
'fusion_hlayer.bias',
'fusion_hlayer.weight']
# -- Load the checkpoint if it's provided
start_epoch = -1
if conf.ckpt is not None:
ckpt = torch.load(conf.ckpt)
# -- If the provided checkpoint is `vg-faster-rcnn`
if conf.ckpt.endswith("vg-faster-rcnn.tar"):
print("Loading Faster-RCNN checkpoint...")
start_epoch = -1
optimistic_restore(detector.detector, ckpt['state_dict'])
# -- Load different heads' weights from faster r-cnn
if hasattr(detector, "roi_fmap") and detector.roi_fmap is not None:
detector.roi_fmap[1][0].weight.data.copy_(ckpt['state_dict']['roi_fmap.0.weight'])
detector.roi_fmap[1][3].weight.data.copy_(ckpt['state_dict']['roi_fmap.3.weight'])
detector.roi_fmap[1][0].bias.data.copy_(ckpt['state_dict']['roi_fmap.0.bias'])
detector.roi_fmap[1][3].bias.data.copy_(ckpt['state_dict']['roi_fmap.3.bias'])
if hasattr(detector, "roi_fmap_obj") and detector.roi_fmap_obj is not None:
detector.roi_fmap_obj[0].weight.data.copy_(ckpt['state_dict']['roi_fmap.0.weight'])
detector.roi_fmap_obj[3].weight.data.copy_(ckpt['state_dict']['roi_fmap.3.weight'])
detector.roi_fmap_obj[0].bias.data.copy_(ckpt['state_dict']['roi_fmap.0.bias'])
detector.roi_fmap_obj[3].bias.data.copy_(ckpt['state_dict']['roi_fmap.3.bias'])
# -- Otherwise
else:
print("Loading everything...")
start_epoch = ckpt['epoch']
# -- Attach the extra checkpoint if it's provided
if conf.extra_ckpt is not None:
print("Attaching the extra checkpoint to the main one!")
extra_ckpt_state_dict = torch.load(conf.extra_ckpt)
ckpt['state_dict'].update(extra_ckpt_state_dict['state_dict'])
# -- Remove unwanted weights from state_dict (last two layers)
if not conf.keep_weights:
remove_params(ckpt['state_dict'], rm_params)
# -- Load the checkpoint
if not optimistic_restore(detector, ckpt['state_dict']):
start_epoch = -1
detector.cuda()
def train_epoch(epoch_num):
detector.train()
tr = []
start = time.time()
# -- Early logging to the tensorboard
if conf.tensorboard_ex:
log_depth_details(detector, None, writer)
# -- *** ADD OTHER MODULES HERE ***
if hasattr(detector, "fusion_hlayer") and detector.fusion_hlayer is not None:
add_module_summary(detector.fusion_hlayer, writer, "fusion_hlayer")
if hasattr(detector, "rel_out") and detector.rel_out is not None:
add_module_summary(detector.rel_out, writer, "rel_out")
for b, batch in enumerate(train_loader):
tr.append(train_batch(batch, verbose=b % (conf.print_interval*10) == 0))
if b % conf.print_interval == 0 and b >= conf.print_interval:
mn = | pd.concat(tr[-conf.print_interval:], axis=1) | pandas.concat |
import pandas as pd
import sys
import utils
import config
nrows = None
tr = utils.load_df(config.data+'train.csv',nrows=nrows)
te = utils.load_df(config.data+'test.csv',nrows=nrows)
actions = ['interaction item image','interaction item info','interaction item deals','interaction item rating','search for item']
df = pd.concat([tr,te])
df_out = df[['session_id']]
trs = utils.load_df(config.feat+'m3_tr_0.ftr')
tes = utils.load_df(config.feat+'m3_te_0.ftr')
df_sample = | pd.concat([trs,tes]) | pandas.concat |
# coding: utf-8
import numpy as np
import pandas as pd
import os
import time
import multiprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, accuracy_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn import preprocessing
from utils import check_and_make_path
# Generate data used for node classification
class DataGenerator(object):
base_path: str
input_base_path: str
output_base_path: str
label_base_path: str
file_sep: str
full_node_list: list
node2idx_dict: dict
node_num: int
train_ratio: float
val_ratio: float
test_ratio: float
def __init__(self, base_path, input_folder, output_folder, node_file, label_folder, file_sep='\t', train_ratio=0.7, val_ratio=0.2, test_ratio=0.1):
self.base_path = base_path
self.input_base_path = os.path.abspath(os.path.join(base_path, input_folder))
self.output_base_path = os.path.abspath(os.path.join(base_path, output_folder))
self.label_base_path = os.path.abspath(os.path.join(base_path, label_folder))
self.file_sep = file_sep
node_file_path = os.path.abspath(os.path.join(base_path, node_file))
nodes_set = pd.read_csv(node_file_path, names=['node'])
self.full_node_list = nodes_set['node'].tolist()
self.node_num = len(self.full_node_list)
self.node2idx_dict = dict(zip(self.full_node_list, np.arange(self.node_num)))
assert train_ratio + test_ratio + val_ratio <= 1.0
self.train_ratio = train_ratio
self.val_ratio = val_ratio
self.test_ratio = test_ratio
check_and_make_path(self.input_base_path)
check_and_make_path(self.output_base_path)
def generate_node_samples(self, file_name, sep='\t'):
date = file_name.split('.')[0]
file_path = os.path.join(self.label_base_path, file_name)
df_nodes = pd.read_csv(file_path, sep=sep, header=0, names=['node', 'label'])
node_num = df_nodes.shape[0]
df_nodes['node'] = df_nodes['node'].apply(lambda x: self.node2idx_dict[x])
node_arr = df_nodes['node'].values
label_arr = df_nodes['label'].values
node_indices = np.arange(node_num)
np.random.shuffle(node_indices)
train_num = int(np.floor(node_num * self.train_ratio))
val_num = int(np.floor(node_num * self.val_ratio))
test_num = int(np.floor(node_num * self.test_ratio))
train_indices = node_indices[: train_num]
train_nodes, train_labels = node_arr[train_indices], label_arr[train_indices]
val_indices = node_indices[train_num: train_num + val_num]
val_nodes, val_labels = node_arr[val_indices], label_arr[val_indices]
test_indices = node_indices[train_num + val_num: train_num + val_num + test_num]
test_nodes, test_labels = node_arr[test_indices], label_arr[test_indices]
train_output_path = os.path.join(self.output_base_path, date + '_train.csv')
df_train = pd.DataFrame({'node': train_nodes, 'label': train_labels})
df_train.to_csv(train_output_path, sep=self.file_sep, index=False)
test_output_path = os.path.join(self.output_base_path, date + '_test.csv')
df_test = pd.DataFrame({'node': test_nodes, 'label': test_labels})
df_test.to_csv(test_output_path, sep=self.file_sep, index=False)
val_output_path = os.path.join(self.output_base_path, date + '_val.csv')
df_val = | pd.DataFrame({'node': val_nodes, 'label': val_labels}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
def load_stats_dataframe(files, aggregated_results=None):
if os.path.exists(aggregated_results) and all([os.path.getmtime(f) < os.path.getmtime(aggregated_results) for f in files]):
return | pd.read_pickle(aggregated_results) | pandas.read_pickle |
import glob
import os
import sys
# these imports and usings need to be in the same order
sys.path.insert(0, "../")
sys.path.insert(0, "TP_model")
sys.path.insert(0, "TP_model/fit_and_forecast")
from Reff_functions import *
from Reff_constants import *
from sys import argv
from datetime import timedelta, datetime
from scipy.special import expit
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("Agg")
def forecast_TP(data_date):
from scenarios import scenarios, scenario_dates
from params import (
num_forecast_days,
alpha_start_date,
delta_start_date,
omicron_start_date,
truncation_days,
start_date,
sim_start_date,
third_start_date,
mob_samples,
)
data_date = pd.to_datetime(data_date)
# Define inputs
sim_start_date = pd.to_datetime(sim_start_date)
# Add 3 days buffer to mobility forecast
num_forecast_days = num_forecast_days + 3
# data_date = pd.to_datetime('2022-01-25')
print("============")
print("Generating forecasts using data from", data_date)
print("============")
# convert third start date to the correct format
third_start_date = pd.to_datetime(third_start_date)
third_end_date = data_date - timedelta(truncation_days)
# a different end date to deal with issues in fitting
third_end_date_diff = data_date - timedelta(18 + 7 + 7)
third_states = sorted(["NSW", "VIC", "ACT", "QLD", "SA", "TAS", "NT", "WA"])
# third_states = sorted(['NSW', 'VIC', 'ACT', 'QLD', 'SA', 'NT'])
# choose dates for each state for third wave
# NOTE: These need to be in date sorted order
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start="2021-06-25", end=third_end_date).values,
"NT": pd.date_range(start="2021-12-20", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-12-10", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-07-10", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
# Get Google Data - Don't use the smoothed data?
df_google_all = read_in_google(Aus_only=True, moving=True, local=True)
third_end_date = pd.to_datetime(data_date) - pd.Timedelta(days=truncation_days)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# Load in vaccination data by state and date which should have the same date as the
# NNDSS/linelist data use the inferred VE
vaccination_by_state_delta = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_delta = vaccination_by_state_delta[["state", "date", "effect"]]
vaccination_by_state_delta = vaccination_by_state_delta.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_delta_array = vaccination_by_state_delta.to_numpy()
vaccination_by_state_omicron = pd.read_csv(
results_dir + "adjusted_vaccine_ts_omicron" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_omicron = vaccination_by_state_omicron[["state", "date", "effect"]]
vaccination_by_state_omicron = vaccination_by_state_omicron.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_omicron_array = vaccination_by_state_omicron.to_numpy()
# Get survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest microdistancing survey is {}".format(surveys.date.values[-1]))
surveys.loc[surveys.state != "ACT", "state"] = (
surveys.loc[surveys.state != "ACT", "state"]
.map(states_initials)
.fillna(surveys.loc[surveys.state != "ACT", "state"])
)
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# fill in date range
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
always = always.fillna(method="bfill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
prop_all = survey_X
## read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest mask wearing survey is {}".format(mask_wearing.date.values[-1]))
# mask_wearing['state'] = mask_wearing['state'].map(states_initials).fillna(mask_wearing['state'])
mask_wearing.loc[mask_wearing.state != "ACT", "state"] = (
mask_wearing.loc[mask_wearing.state != "ACT", "state"]
.map(states_initials)
.fillna(mask_wearing.loc[mask_wearing.state != "ACT", "state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_all = mask_wearing_X
# Get posterior
df_samples = read_in_posterior(
date=data_date.strftime("%Y-%m-%d"),
)
states = sorted(["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"])
plot_states = states.copy()
one_month = data_date + timedelta(days=num_forecast_days)
days_from_March = (one_month - pd.to_datetime(start_date)).days
# filter out future info
prop = prop_all.loc[:data_date]
masks = mask_wearing_all.loc[:data_date]
df_google = df_google_all.loc[df_google_all.date <= data_date]
# use this trick of saving the google data and then reloading it to kill
# the date time values
df_google.to_csv("results/test_google_data.csv")
df_google = pd.read_csv("results/test_google_data.csv")
# remove the temporary file
# os.remove("results/test_google_data.csv")
# Simple interpolation for missing vlaues in Google data
df_google = df_google.interpolate(method="linear", axis=0)
df_google.date = pd.to_datetime(df_google.date)
# forecast time parameters
today = data_date.strftime("%Y-%m-%d")
# add days to forecast if we are missing data
if df_google.date.values[-1] < data_date:
n_forecast = num_forecast_days + (data_date - df_google.date.values[-1]).days
else:
n_forecast = num_forecast_days
training_start_date = datetime(2020, 3, 1, 0, 0)
print(
"Forecast ends at {} days after 1st March".format(
(pd.to_datetime(today) - pd.to_datetime(training_start_date)).days
+ num_forecast_days
)
)
print(
"Final date is {}".format(pd.to_datetime(today) + timedelta(days=num_forecast_days))
)
df_google = df_google.loc[df_google.date >= training_start_date]
outdata = {"date": [], "type": [], "state": [], "mean": [], "std": []}
predictors = mov_values.copy()
# predictors.remove("residential_7days")
# Setup Figures
axes = []
figs = []
for var in predictors:
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
# fig.suptitle(var)
figs.append(fig)
# extra fig for microdistancing
var = "Proportion people always microdistancing"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# # extra fig for mask wearing
var = "Proportion people always wearing masks"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# Forecasting Params
n_training = 21 # Period to examine trend
n_baseline = 150 # Period to create baseline
n_training_vaccination = 30 # period to create trend for vaccination
# since this can be useful, predictor ordering is:
# [
# 'retail_and_recreation_7days',
# 'grocery_and_pharmacy_7days',
# 'parks_7days',
# 'transit_stations_7days',
# 'workplaces_7days'
# ]
# Loop through states and run forecasting.
print("============")
print("Forecasting macro, micro and vaccination")
print("============")
state_Rmed = {}
state_sims = {}
for i, state in enumerate(states):
rownum = int(i / 2)
colnum = np.mod(i, 2)
rows = df_google.loc[df_google.state == state].shape[0]
# Rmed currently a list, needs to be a matrix
Rmed_array = np.zeros(shape=(rows, len(predictors), mob_samples))
for j, var in enumerate(predictors):
for n in range(mob_samples):
# historically we want a little more noise. In the actual forecasting of trends
# we don't want this to be quite that prominent.
Rmed_array[:, j, n] = df_google[df_google["state"] == state][
var
].values.T + np.random.normal(
loc=0, scale=df_google[df_google["state"] == state][var + "_std"]
)
dates = df_google[df_google["state"] == state]["date"]
# cap min and max at historical or (-50,0)
# 1 by predictors by mob_samples size
minRmed_array = np.minimum(-50, np.amin(Rmed_array, axis=0))
maxRmed_array = np.maximum(10, np.amax(Rmed_array, axis=0))
# days by predictors by samples
sims = np.zeros(shape=(n_forecast, len(predictors), mob_samples))
for n in range(mob_samples): # Loop through simulations
Rmed = Rmed_array[:, :, n]
minRmed = minRmed_array[:, n]
maxRmed = maxRmed_array[:, n]
if maxRmed[1] < 20:
maxRmed[1] = 50
R_baseline_mean = np.mean(Rmed[-n_baseline:, :], axis=0)
if state not in {"WA"}:
R_baseline_mean[-1] = 0
R_diffs = np.diff(Rmed[-n_training:, :], axis=0)
mu = np.mean(R_diffs, axis=0)
cov = np.cov(R_diffs, rowvar=False) # columns are vars, rows are obs
# Forecast mobility forward sequentially by day.
# current = np.mean(Rmed[-9:-2, :], axis=0) # Start from last valid days
# current = np.mean(Rmed[-1, :], axis=0) # Start from last valid days
current = Rmed[-1, :] # Start from last valid days
for i in range(n_forecast):
# ## SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast - i) / (n_forecast)
# Generate a single forward realisation of trend
trend_force = np.random.multivariate_normal(mu, cov)
# Generate a single forward realisation of baseline regression
# regression to baseline force stronger in standard forecasting
regression_to_baseline_force = np.random.multivariate_normal(
0.05 * (R_baseline_mean - current), cov
)
new_forcast_points = (
current + p_force * trend_force + (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] != "":
# Make baseline cov for generating points
cov_baseline = np.cov(Rmed[-42:-28, :], rowvar=False)
mu_current = Rmed[-1, :]
mu_victoria = np.array(
[
-55.35057887,
-22.80891056,
-46.59531636,
-75.99942378,
-44.71119293,
]
)
mu_baseline = np.mean(Rmed[-42:-28, :], axis=0)
# mu_baseline = 0*np.mean(Rmed[-42:-28, :], axis=0)
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + (n_forecast - 42)
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# take a continuous median to account for noise in recent observations (such as sunny days)
# mu_current = np.mean(Rmed[-7:, :], axis=0)
# cov_baseline = np.cov(Rmed[-28:, :], rowvar=False)
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
elif scenarios[state] == "no_reversion_continuous_lockdown":
# add the new scenario here
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
# No Lockdown
elif scenarios[state] == "full_reversion":
# a full reversion scenario changes the social mobility and microdistancing
# behaviours at the scenario change date and then applies a return to baseline force
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
R_baseline_0 = mu_baseline
# set adjusted baselines by eyeline for now, need to get this automated
# R_baseline_0[1] = 10 # baseline of +10% for Grocery based on other jurisdictions
# # apply specific baselines to the jurisdictions progressing towards normal restrictions
# if state == 'NSW':
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'ACT':
# R_baseline_0[1] = 20 # baseline of +20% for Grocery based on other jurisdictions
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'VIC':
# R_baseline_0[0] = -15 # baseline of -15% for R&R based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[3] = -30 # baseline of -30% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[4] = -15 # baseline of -15% for workplaces based on 2021-April to 2021-July (pre-third-wave lockdowns)
# the force we trend towards the baseline above with
p_force = (n_forecast - i) / (n_forecast)
trend_force = np.random.multivariate_normal(
mu, cov
) # Generate a single forward realisation of trend
# baseline scalar is smaller for this as we want slow returns
adjusted_baseline_drift_mean = R_baseline_0 - current
# we purposely scale the transit measure so that we increase a little more quickly
# tmp = 0.05 * adjusted_baseline_drift_mean[3]
adjusted_baseline_drift_mean *= 0.005
# adjusted_baseline_drift_mean[3] = tmp
regression_to_baseline_force = np.random.multivariate_normal(
adjusted_baseline_drift_mean, cov
) # Generate a single forward realisation of baseline regression
new_forcast_points = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# new_forcast_points = current + regression_to_baseline_force # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] == "immediately_baseline":
# this scenario is used to return instantly to the baseline levels
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
# jump immediately to baseline
new_forcast_points = np.random.multivariate_normal(
R_baseline_0, cov_baseline
)
# Temporary Lockdown
elif scenarios[state] == "half_reversion":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
(mu_current + mu_baseline) / 2, cov_baseline
)
# Stage 4
elif scenarios[state] == "stage4":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
mu_victoria, cov_baseline
)
# Set this day in this simulation to the forecast realisation
sims[i, :, n] = new_forcast_points
dd = [dates.tolist()[-1] + timedelta(days=x) for x in range(1, n_forecast + 1)]
sims_med = np.median(sims, axis=2) # N by predictors
sims_q25 = np.percentile(sims, 25, axis=2)
sims_q75 = np.percentile(sims, 75, axis=2)
# forecast mircodistancing
# Get a baseline value of microdistancing
mu_overall = np.mean(prop[state].values[-n_baseline:])
md_diffs = np.diff(prop[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_md = (
pd.to_datetime(df_google.date.values[-1])
- pd.to_datetime(prop[state].index.values[-1])
).days
# Set all values to current value.
current = [prop[state].values[-1]] * mob_samples
new_md_forecast = []
# Forecast mobility forward sequentially by day.
for i in range(n_forecast + extra_days_md):
# SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_md - i) / (n_forecast + extra_days_md)
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_diffs, size=mob_samples)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.05 * (mu_overall - current), std_diffs
)
current = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Balance forces
# current = current+p_force*trend_force # Balance forces
elif scenarios[state] != "":
current = np.array(current)
# Make baseline cov for generating points
std_baseline = np.std(prop[state].values[-42:-28])
mu_baseline = np.mean(prop[state].values[-42:-28], axis=0)
mu_current = prop[state].values[-1]
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + extra_days_md
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# use only more recent data to forecast under a no-reversion scenario
# std_lockdown = np.std(prop[state].values[-24:-4])
# current = np.random.normal(mu_current, std_lockdown)
current = np.random.normal(mu_current, std_baseline)
# No Lockdown
elif scenarios[state] == "full_reversion":
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_md - i) / (
n_forecast + extra_days_md
)
# take a mean of the differences over the last 2 weeks
mu_diffs = np.mean(np.diff(prop[state].values[-14:]))
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_baseline)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.005 * (mu_baseline_0 - current), std_baseline
)
current = current + regression_to_baseline_force # Balance forces
elif scenarios[state] == "immediately_baseline":
# this scenario is an immediate return to baseline values
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# jump immediately to baseline
current = np.random.normal(mu_baseline_0, std_baseline)
# Temporary Lockdown
elif scenarios[state] == "half_reversion": # No Lockdown
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
# Revert to values halfway between the before and after
current = np.random.normal(
(mu_current + mu_baseline) / 2, std_baseline
)
new_md_forecast.append(current)
md_sims = np.vstack(new_md_forecast) # Put forecast days together
md_sims = np.minimum(1, md_sims)
md_sims = np.maximum(0, md_sims)
dd_md = [
prop[state].index[-1] + timedelta(days=x)
for x in range(1, n_forecast + extra_days_md + 1)
]
## currently not forecasting masks — may return in the future but will need to assess.
# forecast mask wearing compliance
# Get a baseline value of microdistancing
mu_overall = np.mean(masks[state].values[-n_baseline:])
md_diffs = np.diff(masks[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_masks = (
pd.to_datetime(df_google.date.values[-1])
- pd.to_datetime(masks[state].index.values[-1])
).days
# Set all values to current value.
current = [masks[state].values[-1]] * mob_samples
new_masks_forecast = []
# Forecast mobility forward sequentially by day.
for i in range(n_forecast + extra_days_masks):
# SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# masksortion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_masks - i) / (
n_forecast + extra_days_masks
)
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_diffs, size=mob_samples)
# Generate realisations that draw closer to baseline
# regression_to_baseline_force = np.random.normal(0.05*(mu_overall - current), std_diffs)
# current = current + p_force*trend_force + (1-p_force)*regression_to_baseline_force # Balance forces
current = current + trend_force
elif scenarios[state] != "":
current = np.array(current)
# Make baseline cov for generating points
std_baseline = np.std(masks[state].values[-42:-28])
mu_baseline = np.mean(masks[state].values[-42:-28], axis=0)
mu_current = masks[state].values[-1]
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + extra_days_masks
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# use only more recent data to forecast under a no-reversion scenario
# std_lockdown = np.std(masks[state].values[-24:-4])
# current = np.random.normal(mu_current, std_lockdown)
current = np.random.normal(mu_current, std_baseline)
# No Lockdown
elif scenarios[state] == "full_reversion":
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# masksortion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_masks - i) / (
n_forecast + extra_days_masks
)
# take a mean of the differences over the last 2 weeks
mu_diffs = np.mean(np.diff(masks[state].values[-14:]))
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_baseline)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.005 * (mu_baseline_0 - current), std_baseline
)
current = current + regression_to_baseline_force # Balance forces
elif scenarios[state] == "immediately_baseline":
# this scenario is an immediate return to baseline values
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# jump immediately to baseline
current = np.random.normal(mu_baseline_0, std_baseline)
# Temporary Lockdown
elif scenarios[state] == "half_reversion": # No Lockdown
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
# Revert to values halfway between the before and after
current = np.random.normal(
(mu_current + mu_baseline) / 2, std_baseline
)
new_masks_forecast.append(current)
masks_sims = np.vstack(new_masks_forecast) # Put forecast days together
masks_sims = np.minimum(1, masks_sims)
masks_sims = np.maximum(0, masks_sims)
dd_masks = [
masks[state].index[-1] + timedelta(days=x)
for x in range(1, n_forecast + extra_days_masks + 1)
]
# Forecasting vaccine effect
# if state == "WA":
# last_fit_date = pd.to_datetime(third_end_date)
# else:
last_fit_date = pd.to_datetime(third_date_range[state][-1])
extra_days_vacc = (pd.to_datetime(df_google.date.values[-1]) - last_fit_date).days
total_forecasting_days = n_forecast + extra_days_vacc
# get the VE on the last day
mean_delta = vaccination_by_state_delta.loc[state][last_fit_date + timedelta(1)]
mean_omicron = vaccination_by_state_omicron.loc[state][last_fit_date + timedelta(1)]
current = np.zeros_like(mob_samples)
new_delta = []
new_omicron = []
# variance on the vaccine forecasts is equivalent to what we use in the fitting
var_vax = 0.00005
a_vax = np.zeros_like(mob_samples)
b_vax = np.zeros_like(mob_samples)
for d in pd.date_range(
last_fit_date + timedelta(1),
pd.to_datetime(today) + timedelta(days=num_forecast_days),
):
mean_delta = vaccination_by_state_delta.loc[state][d]
a_vax = mean_delta * (mean_delta * (1 - mean_delta) / var_vax - 1)
b_vax = (1 - mean_delta) * (mean_delta * (1 - mean_delta) / var_vax - 1)
current = np.random.beta(a_vax, b_vax, mob_samples)
new_delta.append(current.tolist())
mean_omicron = vaccination_by_state_omicron.loc[state][d]
a_vax = mean_omicron * (mean_omicron * (1 - mean_omicron) / var_vax - 1)
b_vax = (1 - mean_omicron) * (mean_omicron * (1 - mean_omicron) / var_vax - 1)
current = np.random.beta(a_vax, b_vax, mob_samples)
new_omicron.append(current.tolist())
vacc_sims_delta = np.vstack(new_delta)
vacc_sims_omicron = np.vstack(new_omicron)
dd_vacc = [
last_fit_date + timedelta(days=x)
for x in range(1, n_forecast + extra_days_vacc + 1)
]
for j, var in enumerate(
predictors
+ ["md_prop"]
+ ["masks_prop"]
+ ["vaccination_delta"]
+ ["vaccination_omicron"]
):
# Record data
axs = axes[j]
if (state == "AUS") and (var == "md_prop"):
continue
if var == "md_prop":
outdata["type"].extend([var] * len(dd_md))
outdata["state"].extend([state] * len(dd_md))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_md])
outdata["mean"].extend(np.mean(md_sims, axis=1))
outdata["std"].extend(np.std(md_sims, axis=1))
elif var == "masks_prop":
outdata["type"].extend([var] * len(dd_masks))
outdata["state"].extend([state] * len(dd_masks))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_masks])
outdata["mean"].extend(np.mean(masks_sims, axis=1))
outdata["std"].extend(np.std(masks_sims, axis=1))
elif var == "vaccination_delta":
outdata["type"].extend([var] * len(dd_vacc))
outdata["state"].extend([state] * len(dd_vacc))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_vacc])
outdata["mean"].extend(np.mean(vacc_sims_delta, axis=1))
outdata["std"].extend(np.std(vacc_sims_delta, axis=1))
elif var == "vaccination_omicron":
outdata["type"].extend([var] * len(dd_vacc))
outdata["state"].extend([state] * len(dd_vacc))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_vacc])
outdata["mean"].extend(np.mean(vacc_sims_omicron, axis=1))
outdata["std"].extend(np.std(vacc_sims_omicron, axis=1))
else:
outdata["type"].extend([var] * len(dd))
outdata["state"].extend([state] * len(dd))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd])
outdata["mean"].extend(np.mean(sims[:, j, :], axis=1))
outdata["std"].extend(np.std(sims[:, j, :], axis=1))
if state in plot_states:
if var == "md_prop":
# md plot
axs[rownum, colnum].plot(prop[state].index, prop[state].values, lw=1)
axs[rownum, colnum].plot(dd_md, np.median(md_sims, axis=1), "k", lw=1)
axs[rownum, colnum].fill_between(
dd_md,
np.quantile(md_sims, 0.25, axis=1),
np.quantile(md_sims, 0.75, axis=1),
color="k",
alpha=0.1,
)
elif var == "masks_prop":
# masks plot
axs[rownum, colnum].plot(masks[state].index, masks[state].values, lw=1)
axs[rownum, colnum].plot(
dd_masks, np.median(masks_sims, axis=1), "k", lw=1
)
axs[rownum, colnum].fill_between(
dd_masks,
np.quantile(masks_sims, 0.25, axis=1),
np.quantile(masks_sims, 0.75, axis=1),
color="k",
alpha=0.1,
)
elif var == "vaccination_delta":
# vaccination plot
axs[rownum, colnum].plot(
vaccination_by_state_delta.loc[
state, ~vaccination_by_state_delta.loc[state].isna()
].index,
vaccination_by_state_delta.loc[
state, ~vaccination_by_state_delta.loc[state].isna()
].values,
lw=1,
)
axs[rownum, colnum].plot(
dd_vacc, np.median(vacc_sims_delta, axis=1), color="C1", lw=1
)
axs[rownum, colnum].fill_between(
dd_vacc,
np.quantile(vacc_sims_delta, 0.25, axis=1),
np.quantile(vacc_sims_delta, 0.75, axis=1),
color="C1",
alpha=0.1,
)
elif var == "vaccination_omicron":
# vaccination plot
axs[rownum, colnum].plot(
vaccination_by_state_omicron.loc[
state, ~vaccination_by_state_omicron.loc[state].isna()
].index,
vaccination_by_state_omicron.loc[
state, ~vaccination_by_state_omicron.loc[state].isna()
].values,
lw=1,
)
axs[rownum, colnum].plot(
dd_vacc, np.median(vacc_sims_omicron, axis=1), color="C1", lw=1
)
axs[rownum, colnum].fill_between(
dd_vacc,
np.quantile(vacc_sims_omicron, 0.25, axis=1),
np.quantile(vacc_sims_omicron, 0.75, axis=1),
color="C1",
alpha=0.1,
)
else:
# all other predictors
axs[rownum, colnum].plot(
dates, df_google[df_google["state"] == state][var].values, lw=1
)
axs[rownum, colnum].fill_between(
dates,
np.percentile(Rmed_array[:, j, :], 25, axis=1),
np.percentile(Rmed_array[:, j, :], 75, axis=1),
alpha=0.5,
)
axs[rownum, colnum].plot(dd, sims_med[:, j], color="C1", lw=1)
axs[rownum, colnum].fill_between(
dd, sims_q25[:, j], sims_q75[:, j], color="C1", alpha=0.1
)
# axs[rownum,colnum].axvline(dd[-num_forecast_days], ls = '--', color = 'black', lw=1) # plotting a vertical line at the end of the data date
# axs[rownum,colnum].axvline(dd[-(num_forecast_days+truncation_days)], ls = '-.', color='grey', lw=1) # plotting a vertical line at the forecast date
axs[rownum, colnum].set_title(state)
# plotting horizontal line at 1
axs[rownum, colnum].axhline(1, ls="--", c="k", lw=1)
axs[rownum, colnum].set_title(state)
axs[rownum, colnum].tick_params("x", rotation=90)
axs[rownum, colnum].tick_params("both", labelsize=8)
# plot the start date of the data and indicators of the data we are actually fitting to (in grey)
axs[rownum, colnum].axvline(data_date, ls="-.", color="black", lw=1)
if j < len(predictors):
axs[rownum, colnum].set_ylabel(
predictors[j].replace("_", " ")[:-5], fontsize=7
)
elif var == "md_prop":
axs[rownum, colnum].set_ylabel(
"Proportion of respondents\n micro-distancing", fontsize=7
)
elif var == "masks_prop":
axs[rownum, colnum].set_ylabel(
"Proportion of respondents\n wearing masks", fontsize=7
)
elif var == "vaccination_delta" or var == "vaccination_omicron":
axs[rownum, colnum].set_ylabel(
"Reduction in TP \n from vaccination", fontsize=7
)
# historically we want to store the higher variance mobilities
state_Rmed[state] = Rmed_array
state_sims[state] = sims
os.makedirs(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts",
exist_ok=True,
)
for i, fig in enumerate(figs):
fig.text(0.5, 0.02, "Date", ha="center", va="center", fontsize=15)
if i < len(predictors): # this plots the google mobility forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/"
+ str(predictors[i])
+ ".png",
dpi=400,
)
elif i == len(predictors): # this plots the microdistancing forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/micro_dist.png",
dpi=400,
)
elif i == len(predictors) + 1: # this plots the microdistancing forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/mask_wearing.png",
dpi=400,
)
elif i == len(predictors) + 2: # finally this plots the delta VE forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/delta_vaccination.png",
dpi=400,
)
else: # finally this plots the omicron VE forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/omicron_vaccination.png",
dpi=400,
)
df_out = pd.DataFrame.from_dict(outdata)
df_md = df_out.loc[df_out.type == "md_prop"]
df_masks = df_out.loc[df_out.type == "masks_prop"]
df_out = df_out.loc[df_out.type != "vaccination_delta"]
df_out = df_out.loc[df_out.type != "vaccination_omicron"]
df_out = df_out.loc[df_out.type != "md_prop"]
df_out = df_out.loc[df_out.type != "masks_prop"]
df_forecast = pd.pivot_table(
df_out, columns=["type"], index=["date", "state"], values=["mean"]
)
df_std = pd.pivot_table(
df_out, columns=["type"], index=["date", "state"], values=["std"]
)
df_forecast_md = pd.pivot_table(
df_md, columns=["state"], index=["date"], values=["mean"]
)
df_forecast_md_std = pd.pivot_table(
df_md, columns=["state"], index=["date"], values=["std"]
)
df_forecast_masks = pd.pivot_table(
df_masks, columns=["state"], index=["date"], values=["mean"]
)
df_forecast_masks_std = pd.pivot_table(
df_masks, columns=["state"], index=["date"], values=["std"]
)
# align with google order in columns
df_forecast = df_forecast.reindex([("mean", val) for val in predictors], axis=1)
df_std = df_std.reindex([("std", val) for val in predictors], axis=1)
df_forecast.columns = predictors # remove the tuple name of columns
df_std.columns = predictors
df_forecast = df_forecast.reset_index()
df_std = df_std.reset_index()
df_forecast.date = pd.to_datetime(df_forecast.date)
df_std.date = pd.to_datetime(df_std.date)
df_forecast_md = df_forecast_md.reindex([("mean", state) for state in states], axis=1)
df_forecast_md_std = df_forecast_md_std.reindex(
[("std", state) for state in states], axis=1
)
df_forecast_md.columns = states
df_forecast_md_std.columns = states
df_forecast_md = df_forecast_md.reset_index()
df_forecast_md_std = df_forecast_md_std.reset_index()
df_forecast_md.date = pd.to_datetime(df_forecast_md.date)
df_forecast_md_std.date = pd.to_datetime(df_forecast_md_std.date)
df_forecast_masks = df_forecast_masks.reindex(
[("mean", state) for state in states], axis=1
)
df_forecast_masks_std = df_forecast_masks_std.reindex(
[("std", state) for state in states], axis=1
)
df_forecast_masks.columns = states
df_forecast_masks_std.columns = states
df_forecast_masks = df_forecast_masks.reset_index()
df_forecast_masks_std = df_forecast_masks_std.reset_index()
df_forecast_masks.date = pd.to_datetime(df_forecast_masks.date)
df_forecast_masks_std.date = pd.to_datetime(df_forecast_masks_std.date)
df_R = df_google[["date", "state"] + mov_values + [val + "_std" for val in mov_values]]
df_R = pd.concat([df_R, df_forecast], ignore_index=True, sort=False)
df_R["policy"] = (df_R.date >= "2020-03-20").astype("int8")
df_md = pd.concat([prop, df_forecast_md.set_index("date")])
df_masks = pd.concat([masks, df_forecast_masks.set_index("date")])
# now we read in the ve time series and create an adjusted timeseries from March 1st
# that includes no effect prior
vaccination_by_state = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# initialise a complete dataframe which will be the full VE timeseries plus the forecasted VE
df_ve_delta = pd.DataFrame()
# loop over states and get the offset compoonenets of the full VE
before_vacc_dates = pd.date_range(
start_date, vaccination_by_state.columns[0] - timedelta(days=1), freq="d"
)
# this is just a df of ones with all the missing dates as indices (8 comes from 8 jurisdictions)
before_vacc_Reff_reduction = pd.DataFrame(np.ones(((1, len(before_vacc_dates)))))
before_vacc_Reff_reduction.columns = before_vacc_dates
for state in states:
before_vacc_Reff_reduction.index = {state}
# merge the vaccine data and the 1's dataframes
df_ve_delta[state] = pd.concat(
[before_vacc_Reff_reduction.loc[state].T, vaccination_by_state.loc[state].T]
)
# clip off extra days
df_ve_delta = df_ve_delta[
df_ve_delta.index <= pd.to_datetime(today) + timedelta(days=num_forecast_days)
]
# save the forecasted vaccination line
df_ve_delta.to_csv(
results_dir
+ "forecasted_vaccination_delta"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
vaccination_by_state = pd.read_csv(
results_dir
+ "adjusted_vaccine_ts_omicron"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# initialise a complete dataframe which will be the full VE timeseries plus the forecasted VE
df_ve_omicron = pd.DataFrame()
# loop over states and get the offset compoonenets of the full VE
before_vacc_dates = pd.date_range(
start_date, pd.to_datetime(omicron_start_date) - timedelta(days=1), freq="d"
)
# this is just a df of ones with all the missing dates as indices (8 comes from 8 jurisdictions)
before_vacc_Reff_reduction = pd.DataFrame(np.ones(((1, len(before_vacc_dates)))))
before_vacc_Reff_reduction.columns = before_vacc_dates
for state in states:
before_vacc_Reff_reduction.index = {state}
# merge the vaccine data and the 1's dataframes
df_ve_omicron[state] = pd.concat(
[
before_vacc_Reff_reduction.loc[state].T,
vaccination_by_state.loc[state][
vaccination_by_state.loc[state].index
>= pd.to_datetime(omicron_start_date)
],
]
)
df_ve_omicron = df_ve_omicron[
df_ve_omicron.index <= pd.to_datetime(today) + timedelta(days=num_forecast_days)
]
# save the forecasted vaccination line
df_ve_omicron.to_csv(
results_dir
+ "forecasted_vaccination_omicron"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
print("============")
print("Plotting forecasted estimates")
print("============")
expo_decay = True
theta_md = np.tile(df_samples["theta_md"].values, (df_md["NSW"].shape[0], 1))
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
# np.random.normal(df_md[state].values, df_md_std.values)
prop_sim = df_md[state].values
if expo_decay:
md = ((1 + theta_md).T ** (-1 * prop_sim)).T
else:
md = 2 * expit(-1 * theta_md * prop_sim[:, np.newaxis])
row = i // 2
col = i % 2
ax[row, col].plot(
df_md[state].index, np.median(md, axis=1), label="Microdistancing"
)
ax[row, col].fill_between(
df_md[state].index,
np.quantile(md, 0.25, axis=1),
np.quantile(md, 0.75, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
df_md[state].index,
np.quantile(md, 0.05, axis=1),
np.quantile(md, 0.95, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].set_title(state)
ax[row, col].tick_params("x", rotation=45)
ax[row, col].set_xticks(
[df_md[state].index.values[-n_forecast - extra_days_md]],
minor=True,
)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=1)
fig.text(
0.03,
0.5,
"Multiplicative effect \n of micro-distancing $M_d$",
ha="center",
va="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.5, 0.04, "Date", ha="center", va="center", fontsize=20)
plt.tight_layout(rect=[0.05, 0.04, 1, 1])
fig.savefig(
"figs/"
+ "mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/md_factor.png",
dpi=144,
)
theta_masks = np.tile(df_samples["theta_masks"].values, (df_masks["NSW"].shape[0], 1))
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
# np.random.normal(df_md[state].values, df_md_std.values)
masks_prop_sim = df_masks[state].values
if expo_decay:
mask_wearing_factor = ((1 + theta_masks).T ** (-1 * masks_prop_sim)).T
else:
mask_wearing_factor = 2 * expit(
-1 * theta_masks * masks_prop_sim[:, np.newaxis]
)
row = i // 2
col = i % 2
ax[row, col].plot(
df_masks[state].index,
np.median(mask_wearing_factor, axis=1),
label="Microdistancing",
)
ax[row, col].fill_between(
df_masks[state].index,
np.quantile(mask_wearing_factor, 0.25, axis=1),
np.quantile(mask_wearing_factor, 0.75, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
df_masks[state].index,
np.quantile(mask_wearing_factor, 0.05, axis=1),
np.quantile(mask_wearing_factor, 0.95, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].set_title(state)
ax[row, col].tick_params("x", rotation=45)
ax[row, col].set_xticks(
[df_masks[state].index.values[-n_forecast - extra_days_masks]], minor=True
)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=1)
fig.text(
0.03,
0.5,
"Multiplicative effect \n of mask-wearing $M_d$",
ha="center",
va="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.5, 0.04, "Date", ha="center", va="center", fontsize=20)
plt.tight_layout(rect=[0.05, 0.04, 1, 1])
fig.savefig(
"figs/"
+ "mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/mask_wearing_factor.png",
dpi=144,
)
df_R = df_R.sort_values("date")
# samples = df_samples.sample(n_samples) # test on sample of 2
# keep all samples
samples = df_samples.iloc[:mob_samples, :]
# for strain in ("Delta", "Omicron"):
# samples = df_samples
# flags for advanced scenario modelling
advanced_scenario_modelling = False
save_for_SA = False
# since this can be useful, predictor ordering is:
# ['retail_and_recreation_7days', 'grocery_and_pharmacy_7days', 'parks_7days', 'transit_stations_7days', 'workplaces_7days']
typ = "R_L"
forecast_type = ["R_L"]
for strain in ("Delta", "Omicron"):
print("============")
print("Calculating", strain, "TP")
print("============")
state_Rs = {
"state": [],
"date": [],
"type": [],
"median": [],
"lower": [],
"upper": [],
"bottom": [],
"top": [],
"mean": [],
"std": [],
}
ban = "2020-03-20"
# VIC and NSW allow gatherings of up to 20 people, other jurisdictions allow for
new_pol = "2020-06-01"
expo_decay = True
# start and end date for the third wave
# Subtract 10 days to avoid right truncation
third_end_date = data_date - pd.Timedelta(days=truncation_days)
typ_state_R = {}
mob_forecast_date = df_forecast.date.min()
state_key = {
"ACT": "1",
"NSW": "2",
"NT": "3",
"QLD": "4",
"SA": "5",
"TAS": "6",
"VIC": "7",
"WA": "8",
}
total_N_p_third_omicron = 0
for v in third_date_range.values():
tmp = sum(v >= pd.to_datetime(omicron_start_date))
# add a plus one for inclusion of end date (the else 0 is due to QLD having no Omicron potential)
total_N_p_third_omicron += tmp if tmp > 0 else 0
state_R = {}
for (kk, state) in enumerate(states):
# sort df_R by date so that rows are dates. rows are dates, columns are predictors
df_state = df_R.loc[df_R.state == state]
dd = df_state.date
post_values = samples[predictors].values.T
prop_sim = df_md[state].values
# grab vaccination data
vacc_ts_delta = df_ve_delta[state]
vacc_ts_omicron = df_ve_omicron[state]
# take right size of md to be N by N
theta_md = np.tile(samples["theta_md"].values, (df_state.shape[0], 1))
theta_masks = np.tile(samples["theta_masks"].values, (df_state.shape[0], 1))
r = samples["r[" + str(kk + 1) + "]"].values
tau = samples["tau[" + str(kk + 1) + "]"].values
m0 = samples["m0[" + str(kk + 1) + "]"].values
m1 = samples["m1[" + str(kk + 1) + "]"].values
# m1 = 1.0
md = ((1 + theta_md).T ** (-1 * prop_sim)).T
masks = ((1 + theta_masks).T ** (-1 * masks_prop_sim)).T
third_states_indices = {
state: index + 1 for (index, state) in enumerate(third_states)
}
third_days = {k: v.shape[0] for (k, v) in third_date_range.items()}
third_days_cumulative = np.append(
[0], np.cumsum([v for v in third_days.values()])
)
vax_idx_ranges = {
k: range(third_days_cumulative[i], third_days_cumulative[i + 1])
for (i, k) in enumerate(third_days.keys())
}
third_days_tot = sum(v for v in third_days.values())
# get the sampled vaccination effect (this will be incomplete as it's only over the fitting period)
sampled_vax_effects_all = samples[
["ve_delta[" + str(j + 1) + "]" for j in range(third_days_tot)]
].T
vacc_tmp = sampled_vax_effects_all.iloc[vax_idx_ranges[state], :]
# now we layer in the posterior vaccine multiplier effect which ill be a (T,mob_samples) array
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[vacc_ts_delta.loc[vacc_ts_delta.index < third_date_range[state][0]]]
* mob_samples,
axis=1,
).to_numpy()
vacc_ts_data_after = pd.concat(
[vacc_ts_delta.loc[vacc_ts_delta.index > third_date_range[state][-1]]]
* mob_samples,
axis=1,
).to_numpy()
# merge in order
vacc_ts_delta = np.vstack(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after]
)
# construct a range of dates for omicron which starts at the maximum of the start date for that state or the Omicron start date
third_omicron_date_range = {
k: pd.date_range(
start=max(v[0], pd.to_datetime(omicron_start_date)), end=v[-1]
).values
for (k, v) in third_date_range.items()
}
third_omicron_days = {
k: v.shape[0] for (k, v) in third_omicron_date_range.items()
}
third_omicron_days_cumulative = np.append(
[0], np.cumsum([v for v in third_omicron_days.values()])
)
omicron_ve_idx_ranges = {
k: range(
third_omicron_days_cumulative[i],
third_omicron_days_cumulative[i + 1],
)
for (i, k) in enumerate(third_omicron_days.keys())
}
third_omicron_days_tot = sum(v for v in third_omicron_days.values())
# get the sampled vaccination effect (this will be incomplete as it's only over the fitting period)
sampled_vax_effects_all = (
samples[
["ve_omicron[" + str(j + 1) + "]" for j in range(third_omicron_days_tot)]
].T
)
vacc_tmp = sampled_vax_effects_all.iloc[omicron_ve_idx_ranges[state], :]
# now we layer in the posterior vaccine multiplier effect which ill be a (T,mob_samples) array
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[
vacc_ts_omicron.loc[
vacc_ts_omicron.index < third_omicron_date_range[state][0]
]
]
* mob_samples,
axis=1,
).to_numpy()
vacc_ts_data_after = pd.concat(
[
vacc_ts_omicron.loc[
vacc_ts_omicron.index > third_date_range[state][-1]
]
]
* mob_samples,
axis=1,
).to_numpy()
# merge in order
vacc_ts_omicron = np.vstack(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after]
)
# setup some variables for handling the omicron starts
third_states_indices = {
state: index + 1 for (index, state) in enumerate(third_states)
}
omicron_start_day = (
pd.to_datetime(omicron_start_date) - pd.to_datetime(start_date)
).days
days_into_omicron = np.cumsum(
np.append(
[0],
[
(v >= pd.to_datetime(omicron_start_date)).sum()
for v in third_date_range.values()
],
)
)
idx = {}
kk = 0
for k in third_date_range.keys():
idx[k] = range(days_into_omicron[kk], days_into_omicron[kk + 1])
kk += 1
# tile the reduction in vaccination effect for omicron (i.e. VE is (1+r)*VE)
voc_vacc_product = np.zeros_like(vacc_ts_delta)
# calculate the voc effects
voc_multiplier_delta = samples["voc_effect_delta"].values
voc_multiplier_omicron = samples["voc_effect_omicron"].values
# sample the right R_L
sim_R = samples["R_Li[" + state_key[state] + "]"].values
for n in range(mob_samples):
# add gaussian noise to predictors before forecast
# df_state.loc[
df_state.loc[df_state.date < mob_forecast_date, predictors] = (
state_Rmed[state][:, :, n] / 100
)
# add gaussian noise to predictors after forecast
df_state.loc[df_state.date >= mob_forecast_date, predictors] = (
state_sims[state][:, :, n] / 100
)
## ADVANCED SCENARIO MODELLING - USE ONLY FOR POINT ESTIMATES
# set non-grocery values to 0
if advanced_scenario_modelling:
df_state.loc[:, predictors[0]] = 0
df_state.loc[:, predictors[2]] = 0
df_state.loc[:, predictors[3]] = 0
df_state.loc[:, predictors[4]] = 0
df1 = df_state.loc[df_state.date <= ban]
X1 = df1[predictors] # N by K
md[: X1.shape[0], :] = 1
if n == 0:
# initialise arrays (loggodds)
# N by K times (Nsamples by K )^T = Ndate by Nsamples
logodds = X1 @ post_values[:, n]
df2 = df_state.loc[
(df_state.date > ban) & (df_state.date < new_pol)
]
df3 = df_state.loc[df_state.date >= new_pol]
X2 = df2[predictors]
X3 = df3[predictors]
logodds = np.append(logodds, X2 @ post_values[:, n], axis=0)
logodds = np.append(logodds, X3 @ post_values[:, n], axis=0)
else:
# concatenate to pre-existing logodds martrix
logodds1 = X1 @ post_values[:, n]
df2 = df_state.loc[
(df_state.date > ban) & (df_state.date < new_pol)
]
df3 = df_state.loc[df_state.date >= new_pol]
X2 = df2[predictors]
X3 = df3[predictors]
prop2 = df_md.loc[ban:new_pol, state].values
prop3 = df_md.loc[new_pol:, state].values
logodds2 = X2 @ post_values[:, n]
logodds3 = X3 @ post_values[:, n]
logodds_sample = np.append(logodds1, logodds2, axis=0)
logodds_sample = np.append(logodds_sample, logodds3, axis=0)
# concatenate to previous
logodds = np.vstack((logodds, logodds_sample))
# create an matrix of mob_samples realisations which is an indicator of the voc (delta right now)
# which will be 1 up until the voc_start_date and then it will be values from the posterior sample
voc_multiplier_alpha = samples["voc_effect_alpha"].values
voc_multiplier_delta = samples["voc_effect_delta"].values
voc_multiplier_omicron = samples["voc_effect_omicron"].values
# number of days into omicron forecast
tt = 0
# loop over days in third wave and apply the appropriate form (i.e. decay or not)
# note that in here we apply the entire sample to the vaccination data to create a days by samples array
tmp_date = pd.to_datetime("2020-03-01")
# get the correct Omicron start date
# omicron_start_date_tmp = np.maximum(
# pd.to_datetime(omicron_start_date),
# pd.to_datetime(third_date_range[state][0]),
# )
omicron_start_date_tmp = pd.to_datetime(omicron_start_date)
omicron_start_day_tmp = (
pd.to_datetime(omicron_start_date_tmp) - pd.to_datetime(start_date)
).days
for ii in range(mob_samples):
# if before omicron introduced in a jurisdiction, we consider what period we're at:
# 1. Wildtype
# 2. Alpha
# 3. Delta
voc_vacc_product[:, ii] = vacc_ts_delta[:, ii]
idx_start = df_state.loc[df_state.date < alpha_start_date].shape[0]
idx_end = df_state.loc[df_state.date < delta_start_date].shape[0]
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_alpha[ii]
idx_start = idx_end
idx_end = df_state.loc[df_state.date < omicron_start_date_tmp].shape[0]
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_delta[ii]
idx_start = idx_end
idx_end = np.shape(voc_vacc_product)[0]
if strain == "Delta":
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_delta[ii]
elif strain == "Omicron":
# if omicron we need to account for the Omicron VE prior to the introduction of
# omicron in mid November
voc_vacc_product[idx_start:idx_end, ii] = (
vacc_ts_omicron[idx_start:idx_end, ii] * voc_multiplier_omicron[ii]
)
# save the components of the TP
| pd.DataFrame(sim_R) | pandas.DataFrame |
# coding: utf-8
# In[1]:
from __future__ import division, print_function, absolute_import
from past.builtins import basestring
import os
import gzip
import pandas as pd
from twip.constant import DATA_PATH
from gensim.models import TfidfModel, LsiModel
from gensim.corpora import Dictionary
# In[2]:
import matplotlib
from IPython.display import display, HTML
get_ipython().magic(u'matplotlib inline')
np = pd.np
display(HTML("<style>.container { width:100% !important; }</style>"))
pd.set_option('display.max_rows', 6)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 800)
pd.set_option('precision', 2)
get_ipython().magic(u'precision 4')
get_ipython().magic(u'pprint')
# In[3]:
from sklearn.linear_model import SGDRegressor
from sklearn.svm import SVR
# In[6]:
lsi = LsiModel.load(os.path.join(DATA_PATH, 'lsi100'))
lsi2 = LsiModel.load(os.path.join(DATA_PATH, 'lsi2'))
# In[7]:
with gzip.open(os.path.join(DATA_PATH, 'tweet_topic_vectors.csv.gz'), 'rb') as f:
topics = | pd.DataFrame.from_csv(f, encoding='utf8') | pandas.DataFrame.from_csv |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIndex
import pandas.core.common as com
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_equal)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
pass
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = {axis: list('ABCD')}
obj = self._construct(4, **kwargs)
# no values passed
# self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{axis: str.lower})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
self.assertEqual(s.get(i), d)
self.assertEqual(s.get(i, d), d)
self.assertEqual(s.get(i, "z"), d)
for other in others:
self.assertEqual(s.get(other, "z"), "z")
self.assertEqual(s.get(other, other), other)
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
com.pprint_thing("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda: obj1 and obj2)
self.assertRaises(ValueError, lambda: obj1 or obj2)
self.assertRaises(ValueError, lambda: not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
o = self._construct(shape=4)
for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum',
'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax',
'cummin', 'all', 'any']:
f = getattr(np, op, None)
if f is not None:
f(o)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return self._construct(shape=3, dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
self.assertIsNone(v)
else:
self.assertEqual(v, getattr(y, m, None))
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
raise nose.SkipTest('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
# Check for error when random_state argument invalid.
with tm.assertRaises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with tm.assertRaises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with tm.assertRaises(ValueError):
o.sample(n=-3)
with tm.assertRaises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with tm.assertRaises(ValueError):
o.sample(n=3.2)
# Check lengths are right
self.assertTrue(len(o.sample(n=4) == 4))
self.assertTrue(len(o.sample(frac=0.34) == 3))
self.assertTrue(len(o.sample(frac=0.36) == 4))
###
# Check weights
###
# Weight length must be right
with tm.assertRaises(ValueError):
o.sample(n=3, weights=[0, 1])
with tm.assertRaises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with tm.assertRaises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with tm.assertRaises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with tm.assertRaises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with tm.assertRaises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=nan_weights)
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with tm.assertRaises(ValueError):
s.sample(n=3, weights='weight_column')
panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with tm.assertRaises(ValueError):
panel.sample(n=1, weights='weight_column')
with tm.assertRaises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with tm.assertRaises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with tm.assertRaises(ValueError):
df.sample(n=1, axis=2)
with tm.assertRaises(ValueError):
df.sample(n=1, axis='not_a_name')
with tm.assertRaises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with tm.assertRaises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with tm.assertRaises(ValueError):
df.sample(1, weights=s4)
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
self.assertTrue(o.size == np.prod(o.shape))
self.assertTrue(o.size == 10 ** len(o.axes))
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
self.assertTrue(len(np.array_split(o, 5)) == 5)
self.assertTrue(len(np.array_split(o, 2)) == 2)
def test_unexpected_keyword(self): # GH8597
from pandas.util.testing import assertRaisesRegexp
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
class TestSeries(tm.TestCase, Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x, y)
def setUp(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
def test_rename_mi(self):
s = Series([11, 21, 31],
index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]]))
s.rename(str.lower)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1, 2, 3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1, '2', 3.])
result = o._get_numeric_data()
expected = Series([], dtype=object, index=pd.Index([], dtype=object))
self._compare(result, expected)
o = Series([True, False, True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101', periods=3))
result = o._get_numeric_data()
expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object))
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
self.assertTrue(s.bool())
s = Series([False])
self.assertFalse(s.bool())
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),
Series([False])]:
self.assertRaises(ValueError, lambda: bool(s))
for s in [Series([np.nan]), Series([pd.NaT])]:
self.assertRaises(ValueError, lambda: s.bool())
# multiple bool are still an error
for s in [Series([True, True]), Series([False, False])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
# single non-bool are an error
for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
o = Series(range(3), range(3))
o.name = 'foo'
o2 = Series(range(3), range(3))
o2.name = 'bar'
result = o.T
self.check_metadata(o, result)
# resample
ts = Series(np.random.rand(1000),
index=date_range('20130101', periods=1000, freq='s'),
name='foo')
result = ts.resample('1T').mean()
self.check_metadata(ts, result)
result = ts.resample('1T').min()
self.check_metadata(ts, result)
result = ts.resample('1T').apply(lambda x: x.sum())
self.check_metadata(ts, result)
_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ['name', 'filename']
o.filename = 'foo'
o2.filename = 'bar'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat' and name == 'filename':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
Series.__finalize__ = finalize
result = pd.concat([o, o2])
self.assertEqual(result.filename, 'foo+bar')
self.assertIsNone(result.name)
# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_numpy_array_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_numpy_array_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interp_regression(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
self.assertRaises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
self.assertRaises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
def test_interp_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial', order=1)
def test_interp_nonmono_raise(self):
tm._skip_if_no_scipy()
s = Series([1, np.nan, 3], index=[0, 2, 1])
with tm.assertRaises(ValueError):
s.interpolate(method='krogh')
def test_interp_datetime64(self):
tm._skip_if_no_scipy()
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.],
index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
def test_describe(self):
self.series.describe()
self.ts.describe()
def test_describe_objects(self):
s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a'])
result = s.describe()
expected = Series({'count': 7, 'unique': 4,
'top': 'a', 'freq': 3}, index=result.index)
assert_series_equal(result, expected)
dt = list(self.ts.index)
dt.append(dt[0])
ser = Series(dt)
rs = ser.describe()
min_date = min(dt)
max_date = max(dt)
xp = Series({'count': len(dt),
'unique': len(self.ts.index),
'first': min_date, 'last': max_date, 'freq': 2,
'top': min_date}, index=rs.index)
assert_series_equal(rs, xp)
def test_describe_empty(self):
result = pd.Series().describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
nanSeries = Series([np.nan])
nanSeries.name = 'NaN'
result = nanSeries.describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
def test_describe_none(self):
noneSeries = Series([None])
noneSeries.name = 'None'
expected = Series([0, 0], index=['count', 'unique'], name='None')
assert_series_equal(noneSeries.describe(), expected)
class TestDataFrame(tm.TestCase, Generic):
_typ = DataFrame
_comparator = lambda self, x, y: assert_frame_equal(x, y)
def test_rename_mi(self):
df = DataFrame([
11, 21, 31
], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]]))
df.rename(str.lower)
def test_nonzero_single_element(self):
# allow single item via bool method
df = DataFrame([[True]])
self.assertTrue(df.bool())
df = DataFrame([[False]])
self.assertFalse(df.bool())
df = DataFrame([[False, False]])
self.assertRaises(ValueError, lambda: df.bool())
self.assertRaises(ValueError, lambda: bool(df))
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = DataFrame({'A': [1, '2', 3.]})
result = o._get_numeric_data()
expected = DataFrame(index=[0, 1, 2], dtype=object)
self._compare(result, expected)
def test_interp_basic(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
expected = DataFrame({'A': [1., 2., 3., 4.],
'B': [1., 4., 9., 9.],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df.interpolate()
assert_frame_equal(result, expected)
result = df.set_index('C').interpolate()
expected = df.set_index('C')
expected.loc[3, 'A'] = 3
expected.loc[5, 'B'] = 9
assert_frame_equal(result, expected)
def test_interp_bad_method(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
with tm.assertRaises(ValueError):
df.interpolate(method='not_a_method')
def test_interp_combo(self):
df = DataFrame({'A': [1., 2., np.nan, 4.],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df['A'].interpolate()
expected = Series([1., 2., 3., 4.], name='A')
assert_series_equal(result, expected)
result = df['A'].interpolate(downcast='infer')
expected = Series([1, 2, 3, 4], name='A')
assert_series_equal(result, expected)
def test_interp_nan_idx(self):
df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [np.nan, 2, 3, 4]})
df = df.set_index('A')
with tm.assertRaises(NotImplementedError):
df.interpolate(method='values')
def test_interp_various(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
df = df.set_index('C')
expected = df.copy()
result = df.interpolate(method='polynomial', order=1)
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923076
assert_frame_equal(result, expected)
result = df.interpolate(method='cubic')
expected.A.loc[3] = 2.81621174
expected.A.loc[13] = 5.64146581
assert_frame_equal(result, expected)
result = df.interpolate(method='nearest')
expected.A.loc[3] = 2
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
result = df.interpolate(method='slinear')
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923077
assert_frame_equal(result, expected)
result = df.interpolate(method='zero')
expected.A.loc[3] = 2.
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
def test_interp_alt_scipy(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
result = df.interpolate(method='barycentric')
expected = df.copy()
expected.ix[2, 'A'] = 3
expected.ix[5, 'A'] = 6
assert_frame_equal(result, expected)
result = df.interpolate(method='barycentric', downcast='infer')
assert_frame_equal(result, expected.astype(np.int64))
result = df.interpolate(method='krogh')
expectedk = df.copy()
expectedk['A'] = expected['A']
assert_frame_equal(result, expectedk)
_skip_if_no_pchip()
import scipy
result = df.interpolate(method='pchip')
expected.ix[2, 'A'] = 3
if LooseVersion(scipy.__version__) >= '0.17.0':
expected.ix[5, 'A'] = 6.0
else:
expected.ix[5, 'A'] = 6.125
assert_frame_equal(result, expected)
def test_interp_rowwise(self):
df = DataFrame({0: [1, 2, np.nan, 4],
1: [2, 3, 4, np.nan],
2: [np.nan, 4, 5, 6],
3: [4, np.nan, 6, 7],
4: [1, 2, 3, 4]})
result = df.interpolate(axis=1)
expected = df.copy()
expected.loc[3, 1] = 5
expected.loc[0, 2] = 3
expected.loc[1, 3] = 3
expected[4] = expected[4].astype(np.float64)
assert_frame_equal(result, expected)
# scipy route
tm._skip_if_no_scipy()
result = df.interpolate(axis=1, method='values')
assert_frame_equal(result, expected)
result = df.interpolate(axis=0)
expected = df.interpolate()
assert_frame_equal(result, expected)
def test_rowwise_alt(self):
df = DataFrame({0: [0, .5, 1., np.nan, 4, 8, np.nan, np.nan, 64],
1: [1, 2, 3, 4, 3, 2, 1, 0, -1]})
df.interpolate(axis=0)
def test_interp_leading_nans(self):
df = DataFrame({"A": [np.nan, np.nan, .5, .25, 0],
"B": [np.nan, -3, -3.5, np.nan, -4]})
result = df.interpolate()
expected = df.copy()
expected['B'].loc[3] = -3.75
assert_frame_equal(result, expected)
tm._skip_if_no_scipy()
result = df.interpolate(method='polynomial', order=1)
assert_frame_equal(result, expected)
def test_interp_raise_on_only_mixed(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': ['a', 'b', 'c', 'd'],
'C': [np.nan, 2, 5, 7],
'D': [np.nan, np.nan, 9, 9],
'E': [1, 2, 3, 4]})
with tm.assertRaises(TypeError):
df.interpolate(axis=1)
def test_interp_inplace(self):
df = DataFrame({'a': [1., 2., np.nan, 4.]})
expected = DataFrame({'a': [1., 2., 3., 4.]})
result = df.copy()
result['a'].interpolate(inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result['a'].interpolate(inplace=True, downcast='infer')
assert_frame_equal(result, expected.astype('int64'))
def test_interp_inplace_row(self):
# GH 10395
result = DataFrame({'a': [1., 2., 3., 4.],
'b': [np.nan, 2., 3., 4.],
'c': [3, 2, 2, 2]})
expected = result.interpolate(method='linear', axis=1, inplace=False)
result.interpolate(method='linear', axis=1, inplace=True)
assert_frame_equal(result, expected)
def test_interp_ignore_all_good(self):
# GH
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 2, 3, 4],
'C': [1., 2., np.nan, 4.],
'D': [1., 2., 3., 4.]})
expected = DataFrame({'A': np.array(
[1, 2, 3, 4], dtype='float64'),
'B': np.array(
[1, 2, 3, 4], dtype='int64'),
'C': np.array(
[1., 2., 3, 4.], dtype='float64'),
'D': np.array(
[1., 2., 3., 4.], dtype='float64')})
result = df.interpolate(downcast=None)
assert_frame_equal(result, expected)
# all good
result = df[['B', 'D']].interpolate(downcast=None)
assert_frame_equal(result, df[['B', 'D']])
def test_describe(self):
tm.makeDataFrame().describe()
tm.makeMixedDataFrame().describe()
tm.makeTimeDataFrame().describe()
def test_describe_percentiles_percent_or_raw(self):
msg = 'percentiles should all be in the interval \\[0, 1\\]'
df = tm.makeDataFrame()
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[10, 50, 100])
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[2])
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[-2])
def test_describe_percentiles_equivalence(self):
df = tm.makeDataFrame()
d1 = df.describe()
d2 = df.describe(percentiles=[.25, .75])
assert_frame_equal(d1, d2)
def test_describe_percentiles_insert_median(self):
df = tm.makeDataFrame()
d1 = df.describe(percentiles=[.25, .75])
d2 = df.describe(percentiles=[.25, .5, .75])
assert_frame_equal(d1, d2)
self.assertTrue('25%' in d1.index)
self.assertTrue('75%' in d2.index)
# none above
d1 = df.describe(percentiles=[.25, .45])
d2 = df.describe(percentiles=[.25, .45, .5])
assert_frame_equal(d1, d2)
self.assertTrue('25%' in d1.index)
self.assertTrue('45%' in d2.index)
# none below
d1 = df.describe(percentiles=[.75, 1])
d2 = df.describe(percentiles=[.5, .75, 1])
assert_frame_equal(d1, d2)
self.assertTrue('75%' in d1.index)
self.assertTrue('100%' in d2.index)
# edge
d1 = df.describe(percentiles=[0, 1])
d2 = df.describe(percentiles=[0, .5, 1])
assert_frame_equal(d1, d2)
self.assertTrue('0%' in d1.index)
self.assertTrue('100%' in d2.index)
def test_describe_no_numeric(self):
df = DataFrame({'A': ['foo', 'foo', 'bar'] * 8,
'B': ['a', 'b', 'c', 'd'] * 6})
desc = df.describe()
expected = DataFrame(dict((k, v.describe())
for k, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(desc, expected)
ts = tm.makeTimeSeries()
df = DataFrame({'time': ts.index})
desc = df.describe()
self.assertEqual(desc.time['first'], min(ts.index))
def test_describe_empty_int_columns(self):
df = DataFrame([[0, 1], [1, 2]])
desc = df[df[0] < 0].describe() # works
assert_series_equal(desc.xs('count'),
Series([0, 0], dtype=float, name='count'))
self.assertTrue(isnull(desc.ix[1:]).all().all())
def test_describe_objects(self):
df = DataFrame({"C1": ['a', 'a', 'c'], "C2": ['d', 'd', 'f']})
result = df.describe()
expected = DataFrame({"C1": [3, 2, 'a', 2], "C2": [3, 2, 'd', 2]},
index=['count', 'unique', 'top', 'freq'])
assert_frame_equal(result, expected)
df = DataFrame({"C1": pd.date_range('2010-01-01', periods=4, freq='D')
})
df.loc[4] = pd.Timestamp('2010-01-04')
result = df.describe()
expected = DataFrame({"C1": [5, 4, pd.Timestamp('2010-01-04'), 2,
pd.Timestamp('2010-01-01'),
pd.Timestamp('2010-01-04')]},
index=['count', 'unique', 'top', 'freq',
'first', 'last'])
assert_frame_equal(result, expected)
# mix time and str
df['C2'] = ['a', 'a', 'b', 'c', 'a']
result = df.describe()
expected['C2'] = [5, 3, 'a', 3, np.nan, np.nan]
assert_frame_equal(result, expected)
# just str
expected = DataFrame({'C2': [5, 3, 'a', 4]},
index=['count', 'unique', 'top', 'freq'])
result = df[['C2']].describe()
# mix of time, str, numeric
df['C3'] = [2, 4, 6, 8, 2]
result = df.describe()
expected = DataFrame({"C3": [5., 4.4, 2.607681, 2., 2., 4., 6., 8.]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
assert_frame_equal(result, expected)
assert_frame_equal(df.describe(), df[['C3']].describe())
assert_frame_equal(df[['C1', 'C3']].describe(), df[['C3']].describe())
assert_frame_equal(df[['C2', 'C3']].describe(), df[['C3']].describe())
def test_describe_typefiltering(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24, dtype='int64'),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
descN = df.describe()
expected_cols = ['numC', 'numD', ]
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descN, expected)
desc = df.describe(include=['number'])
assert_frame_equal(desc, descN)
desc = df.describe(exclude=['object', 'datetime'])
assert_frame_equal(desc, descN)
desc = df.describe(include=['float'])
assert_frame_equal(desc, descN.drop('numC', 1))
descC = df.describe(include=['O'])
expected_cols = ['catA', 'catB']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descC, expected)
descD = df.describe(include=['datetime'])
assert_series_equal(descD.ts, df.ts.describe())
desc = df.describe(include=['object', 'number', 'datetime'])
assert_frame_equal(desc.loc[:, ["numC", "numD"]].dropna(), descN)
assert_frame_equal(desc.loc[:, ["catA", "catB"]].dropna(), descC)
descDs = descD.sort_index() # the index order change for mixed-types
assert_frame_equal(desc.loc[:, "ts":].dropna().sort_index(), descDs)
desc = df.loc[:, 'catA':'catB'].describe(include='all')
assert_frame_equal(desc, descC)
desc = df.loc[:, 'numC':'numD'].describe(include='all')
assert_frame_equal(desc, descN)
desc = df.describe(percentiles=[], include='all')
cnt = Series(data=[4, 4, 6, 6, 6],
index=['catA', 'catB', 'numC', 'numD', 'ts'])
assert_series_equal(desc.count(), cnt)
self.assertTrue('count' in desc.index)
self.assertTrue('unique' in desc.index)
self.assertTrue('50%' in desc.index)
self.assertTrue('first' in desc.index)
desc = df.drop("ts", 1).describe(percentiles=[], include='all')
assert_series_equal(desc.count(), cnt.drop("ts"))
self.assertTrue('first' not in desc.index)
desc = df.drop(["numC", "numD"], 1).describe(percentiles=[],
include='all')
assert_series_equal(desc.count(), cnt.drop(["numC", "numD"]))
self.assertTrue('50%' not in desc.index)
def test_describe_typefiltering_category_bool(self):
df = DataFrame({'A_cat': pd.Categorical(['foo', 'foo', 'bar'] * 8),
'B_str': ['a', 'b', 'c', 'd'] * 6,
'C_bool': [True] * 12 + [False] * 12,
'D_num': np.arange(24.) + .5,
'E_ts': tm.makeTimeSeries()[:24].index})
# bool is considered numeric in describe, although not an np.number
desc = df.describe()
expected_cols = ['C_bool', 'D_num']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(desc, expected)
desc = df.describe(include=["category"])
self.assertTrue(desc.columns.tolist() == ["A_cat"])
# 'all' includes numpy-dtypes + category
desc1 = df.describe(include="all")
desc2 = df.describe(include=[np.generic, "category"])
assert_frame_equal(desc1, desc2)
def test_describe_timedelta(self):
df = DataFrame({"td": pd.to_timedelta(np.arange(24) % 20, "D")})
self.assertTrue(df.describe().loc["mean"][0] == pd.to_timedelta(
"8d4h"))
def test_describe_typefiltering_dupcol(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
s = df.describe(include='all').shape[1]
df = pd.concat([df, df], axis=1)
s2 = df.describe(include='all').shape[1]
self.assertTrue(s2 == 2 * s)
def test_describe_typefiltering_groupby(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
G = df.groupby('catA')
self.assertTrue(G.describe(include=['number']).shape == (16, 2))
self.assertTrue(G.describe(include=['number', 'object']).shape == (22,
3))
self.assertTrue(G.describe(include='all').shape == (26, 4))
def test_describe_multi_index_df_column_names(self):
""" Test that column names persist after the describe operation."""
df = pd.DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
# GH 11517
# test for hierarchical index
hierarchical_index_df = df.groupby(['A', 'B']).mean().T
self.assertTrue(hierarchical_index_df.columns.names == ['A', 'B'])
self.assertTrue(hierarchical_index_df.describe().columns.names ==
['A', 'B'])
# test for non-hierarchical index
non_hierarchical_index_df = df.groupby(['A']).mean().T
self.assertTrue(non_hierarchical_index_df.columns.names == ['A'])
self.assertTrue(non_hierarchical_index_df.describe().columns.names ==
['A'])
def test_no_order(self):
tm._skip_if_no_scipy()
s = Series([0, 1, np.nan, 3])
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial')
with tm.assertRaises(ValueError):
s.interpolate(method='spline')
def test_spline(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method='spline', order=1)
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
def test_spline_extrapolate(self):
tm.skip_if_no_package(
'scipy', '0.15',
'setting ext on scipy.interpolate.UnivariateSpline')
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method='spline', order=1, ext=3)
expected3 = Series([1., 2., 3., 4., 5., 6., 6.])
assert_series_equal(result3, expected3)
result1 = s.interpolate(method='spline', order=1, ext=0)
expected1 = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result1, expected1)
def test_spline_smooth(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
self.assertNotEqual(s.interpolate(method='spline', order=3, s=0)[5],
s.interpolate(method='spline', order=3)[5])
def test_spline_interpolation(self):
tm._skip_if_no_scipy()
s = Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
result1 = s.interpolate(method='spline', order=1)
expected1 = s.interpolate(method='spline', order=1)
assert_series_equal(result1, expected1)
# GH #10633
def test_spline_error(self):
tm._skip_if_no_scipy()
s = pd.Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
with | tm.assertRaises(ValueError) | pandas.util.testing.assertRaises |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
from copy import deepcopy
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt.utils.random_ import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.portfolio['attach_call_seq'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_execute_order_nb():
# Errors, ignored and rejected orders
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(-100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.nan, 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.inf, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.nan, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., np.nan, 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., -10., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., np.nan, 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., -100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, -10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=0))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=-10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=np.nan))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., np.nan, 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., -10., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(0, 10))
assert exec_state == ExecuteOrderState(cash=100.0, position=10.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(15, 10, max_size=10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=1.))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100, 0., np.inf, np.nan, 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(100, 10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-200, 10, direction=Direction.LongOnly, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, fixed_fees=1000))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=180.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=909.0, position=-100.0, debt=900.0, free_cash=-891.0)
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=7.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=125.0, position=-2.5, debt=25.0, free_cash=75.0)
assert_same_tuple(order_result, OrderResult(
size=7.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-2.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=-2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-7.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-10.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(150., -5., 0., 150., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=300.0, position=-20.0, debt=150.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=50.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(1000., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 17.5, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=850.0, position=3.571428571428571, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.571428571428571, price=17.5, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 100, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=37.5, position=-4.375, debt=43.75, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=0.625, price=100.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 10., 0., -50., 10., 100., 0, 0),
nb.order_nb(-20, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=150.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 1., 0., -50., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=10.0, position=0.0, debt=0.0, free_cash=-40.0)
assert_same_tuple(order_result, OrderResult(
size=1.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., -100., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=-100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-20, 10, fees=0.1, slippage=0.1, fixed_fees=1., lock_cash=True))
assert exec_state == ExecuteOrderState(cash=80.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_both(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='both', **kwargs)
def from_orders_longonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='longonly', **kwargs)
def from_orders_shortonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_both().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1), (8, 2, 0, 100.0, 1.0, 0.0, 0),
(9, 2, 1, 100.0, 2.0, 0.0, 1), (10, 2, 3, 50.0, 4.0, 0.0, 0), (11, 2, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 2, 0, 100.0, 1.0, 0.0, 1), (5, 2, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_both(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 198.01980198019803, 2.02, 0.0, 1),
(2, 0, 3, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 1),
(2, 0, 3, 49.504950495049506, 4.04, 0.0, 0), (3, 0, 4, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1),
(2, 0, 3, 50.0, 4.0, 0.0, 0), (3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 0), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 1), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 1, 3, 1.0, 4.0, 0.4, 1), (7, 1, 4, 1.0, 5.0, 0.5, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 2.0, 0), (10, 2, 3, 1.0, 4.0, 4.0, 1), (11, 2, 4, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 1, 3, 1.0, 4.0, 0.1, 1), (7, 1, 4, 1.0, 5.0, 0.1, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 1.0, 0), (10, 2, 3, 1.0, 4.0, 1.0, 1), (11, 2, 4, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_both(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 1, 3, 1.0, 3.6, 0.0, 1), (7, 1, 4, 1.0, 5.5, 0.0, 0), (8, 2, 0, 1.0, 0.0, 0.0, 1),
(9, 2, 1, 1.0, 4.0, 0.0, 0), (10, 2, 3, 1.0, 0.0, 0.0, 1), (11, 2, 4, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0), (2, 0, 3, 0.5, 4.0, 0.0, 1),
(3, 0, 4, 0.5, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0), (8, 2, 0, 1.0, 1.0, 0.0, 1),
(9, 2, 1, 1.0, 2.0, 0.0, 0), (10, 2, 3, 1.0, 4.0, 0.0, 1), (11, 2, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_both(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 1, 3, 1.0, 4.0, 0.0, 0),
(6, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 3, 1.0, 4.0, 0.0, 0), (5, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_lock_cash(self):
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[143.12812469365747, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-49.5, -49.5]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[94.6034702480149, 47.54435839623566]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[49.5, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[1.4312812469365748, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-96.16606313106556, -96.16606313106556]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[0.4699090272918124, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[98.06958012596222, 98.06958012596222]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = from_orders_both(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 1000., 2., 0., 1),
(2, 0, 3, 500., 4., 0., 0), (3, 0, 4, 1000., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 200., 2., 0., 1),
(6, 1, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-6600.0, 0.0]
])
)
pf = from_orders_longonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 100., 2., 0., 1),
(2, 0, 3, 50., 4., 0., 0), (3, 0, 4, 50., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 100., 2., 0., 1),
(6, 1, 3, 50., 4., 0., 0), (7, 1, 4, 50., 5., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[200.0, 200.0],
[200.0, 200.0],
[0.0, 0.0],
[250.0, 250.0]
])
)
pf = from_orders_shortonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1000., 1., 0., 1), (1, 0, 1, 550., 2., 0., 0),
(2, 0, 3, 1000., 4., 0., 1), (3, 0, 4, 800., 5., 0., 0),
(4, 1, 0, 100., 1., 0., 1), (5, 1, 1, 100., 2., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[-900.0, 0.0],
[-900.0, 0.0],
[-900.0, 0.0],
[-4900.0, 0.0],
[-3989.6551724137926, 0.0]
])
)
def test_allow_partial(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 1, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0), (4, 1, 0, 1000.0, 1.0, 0.0, 1), (5, 1, 3, 1000.0, 4.0, 0.0, 1),
(6, 1, 4, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_orders_both(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0,
100.0, 0.0, 0.0, 1.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 1, 0.0, 100.0, 0.0, 0.0, 2.0, 200.0, -np.inf, 2.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 2.0, 200.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 0, 0, 2, 400.0, -100.0, 200.0, 0.0, 3.0, 100.0, np.nan, 3.0, 0,
2, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 3.0, 100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 0, 0, 3, 400.0, -100.0, 200.0, 0.0, 4.0, 0.0, np.inf, 4.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 4.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 0, 0, 4, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0, -np.inf, 5.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 5.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_orders_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_value(self):
record_arrays_close(
from_orders_both(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0),
(2, 0, 3, 0.25, 4.0, 0.0, 1), (3, 0, 4, 0.2, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_amount(self):
record_arrays_close(
from_orders_both(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=75., size_type='targetamount',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_both(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 1, 2, 8.333333333333332, 3.0, 0.0, 0),
(8, 1, 3, 4.166666666666668, 4.0, 0.0, 0), (9, 1, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 25.0, 2.0, 0.0, 0),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 0), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 0),
(4, 0, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0),
(2, 0, 1, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 2, 1, 25.0, 2.0, 0.0, 0), (5, 0, 2, 8.333333333333332, 3.0, 0.0, 1),
(6, 1, 2, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 0, 3, 4.166666666666668, 4.0, 0.0, 1), (9, 1, 3, 4.166666666666668, 4.0, 0.0, 1),
(10, 2, 3, 4.166666666666668, 4.0, 0.0, 1), (11, 0, 4, 2.5, 5.0, 0.0, 1),
(12, 1, 4, 2.5, 5.0, 0.0, 1), (13, 2, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 1, 2, 6.25, 3.0, 0.0, 0), (8, 1, 3, 2.34375, 4.0, 0.0, 0),
(9, 1, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 37.5, 2.0, 0.0, 0), (2, 0, 2, 6.25, 3.0, 0.0, 0),
(3, 0, 3, 2.34375, 4.0, 0.0, 0), (4, 0, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_update_value(self):
record_arrays_close(
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=False).order_records,
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=True).order_records
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=False).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.9465661198057499, 2.02, 0.019120635620076154, 0),
(4, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(5, 1, 2, 0.018558300554959377, 3.0300000000000002, 0.0005623165068152705, 0),
(6, 0, 3, 0.00037870218456959037, 3.96, 1.4996606508955778e-05, 1),
(7, 1, 3, 0.0003638525743521767, 4.04, 1.4699644003827875e-05, 0),
(8, 0, 4, 7.424805112066224e-06, 4.95, 3.675278530472781e-07, 1),
(9, 1, 4, 7.133664827307231e-06, 5.05, 3.6025007377901643e-07, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.7303208018821721, 2.02, 0.014752480198019875, 0),
(4, 2, 1, 0.21624531792357785, 2.02, 0.0043681554220562635, 0),
(5, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(6, 1, 2, 0.009608602243410758, 2.9699999999999998, 0.00028537548662929945, 1),
(7, 2, 2, 0.02779013180558861, 3.0300000000000002, 0.0008420409937093393, 0),
(8, 0, 3, 0.0005670876809631409, 3.96, 2.2456672166140378e-05, 1),
(9, 1, 3, 0.00037770350099464167, 3.96, 1.4957058639387809e-05, 1),
(10, 2, 3, 0.0009077441794302741, 4.04, 3.6672864848982974e-05, 0),
(11, 0, 4, 1.8523501267964093e-05, 4.95, 9.169133127642227e-07, 1),
(12, 1, 4, 1.2972670177191503e-05, 4.95, 6.421471737709794e-07, 1),
(13, 2, 4, 3.0261148547590434e-05, 5.05, 1.5281880016533242e-06, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0), (5, 1, 0, 50., 1., 0., 1),
(6, 1, 1, 12.5, 2., 0., 1), (7, 1, 2, 4.16666667, 3., 0., 1),
(8, 1, 3, 1.5625, 4., 0., 1), (9, 1, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 1, 12.5, 2., 0., 1),
(2, 0, 2, 4.16666667, 3., 0., 1), (3, 0, 3, 1.5625, 4., 0., 1),
(4, 0, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 1, 0, 2.50000000e+01, 1., 0., 0),
(2, 2, 0, 1.25000000e+01, 1., 0., 0), (3, 0, 1, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 2, 1, 7.81250000e-01, 2., 0., 0),
(6, 0, 2, 2.60416667e-01, 3., 0., 0), (7, 1, 2, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 0, 3, 2.44140625e-02, 4., 0., 0),
(10, 1, 3, 1.22070312e-02, 4., 0., 0), (11, 2, 3, 6.10351562e-03, 4., 0., 0),
(12, 0, 4, 2.44140625e-03, 5., 0., 0), (13, 1, 4, 1.22070312e-03, 5., 0., 0),
(14, 2, 4, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_both(close=price_wide)
_ = from_orders_both(close=price_wide, max_orders=9)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_both(close=price_wide, log=True)
_ = from_orders_both(close=price_wide, log=True, max_logs=15)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, log=True, max_logs=14)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='both', **kwargs)
def from_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='shortonly', **kwargs)
def from_ls_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, False, exits, False, **kwargs)
def from_ls_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, False, False, **kwargs)
def from_ls_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, False, False, entries, exits, **kwargs)
class TestFromSignals:
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_one_column(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_multiple_columns(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 200., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 100., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0),
(2, 1, 0, 100., 1., 0., 1), (3, 1, 3, 50., 4., 0., 0),
(4, 2, 0, 100., 1., 0., 1), (5, 2, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_custom_signal_func(self):
@njit
def signal_func_nb(c, long_num_arr, short_num_arr):
long_num = nb.get_elem_nb(c, long_num_arr)
short_num = nb.get_elem_nb(c, short_num_arr)
is_long_entry = long_num > 0
is_long_exit = long_num < 0
is_short_entry = short_num > 0
is_short_exit = short_num < 0
return is_long_entry, is_long_exit, is_short_entry, is_short_exit
pf_base = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
entries=pd.Series([True, False, False, False, False]),
exits=pd.Series([False, False, True, False, False]),
short_entries=pd.Series([False, True, False, True, False]),
short_exits=pd.Series([False, False, False, False, True]),
size=1,
upon_opposite_entry='ignore'
)
pf = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
signal_func_nb=signal_func_nb,
signal_args=(vbt.Rep('long_num_arr'), vbt.Rep('short_num_arr')),
broadcast_named_args=dict(
long_num_arr=pd.Series([1, 0, -1, 0, 0]),
short_num_arr=pd.Series([0, 1, 0, 1, -1])
),
size=1,
upon_opposite_entry='ignore'
)
record_arrays_close(
pf_base.order_records,
pf.order_records
)
def test_amount(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 2.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_value(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 0.3125, 4.0, 0.0, 1),
(2, 1, 4, 0.1775, 5.0, 0.0, 1), (3, 2, 0, 100.0, 1.0, 0.0, 0),
(4, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception):
_ = from_signals_both(size=0.5, size_type='percent')
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1), (2, 0, 4, 25., 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close',
accumulate=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 0),
(2, 0, 3, 62.5, 4.0, 0.0, 1), (3, 0, 4, 27.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 3, 37.5, 4., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 25., 1., 0., 0),
(2, 2, 0, 12.5, 1., 0., 0), (3, 0, 3, 50., 4., 0., 1),
(4, 1, 3, 25., 4., 0., 1), (5, 2, 3, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 3, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 0, 3, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 3, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_signals_both(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.8, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.4, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.4, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_both(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.1, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_both(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 2.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 1.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 0.9, 0.0, 1),
(3, 1, 3, 1.0, 4.4, 0.0, 0), (4, 2, 0, 1.0, 0.0, 0.0, 1), (5, 2, 3, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_both(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_both(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 0, 4, 0.5, 5.0, 0.0, 1),
(3, 1, 0, 1.0, 1.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1), (4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 3, 0.5, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_both(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1), (2, 1, 3, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 3, 275.0, 4.0, 0.0, 0), (2, 1, 0, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_both(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_signals_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2, 0.0, 0.0,
0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 100.0, 0.0, 0.0, 1.0,
100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 3, 0.0, 100.0, 0.0, 0.0, 4.0, 400.0, -np.inf, 4.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 800.0, -100.0,
400.0, 0.0, 4.0, 400.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_accumulate(self):
record_arrays_close(
from_signals_both(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 3.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 1.0, 4.0, 0.0, 1), (8, 2, 4, 1.0, 5.0, 0.0, 1),
(9, 3, 0, 1.0, 1.0, 0.0, 0), (10, 3, 1, 1.0, 2.0, 0.0, 0), (11, 3, 3, 1.0, 4.0, 0.0, 1),
(12, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 2.0, 4.0, 0.0, 1), (5, 2, 0, 1.0, 1.0, 0.0, 0),
(6, 2, 3, 1.0, 4.0, 0.0, 1), (7, 3, 0, 1.0, 1.0, 0.0, 0), (8, 3, 1, 1.0, 2.0, 0.0, 0),
(9, 3, 3, 1.0, 4.0, 0.0, 1), (10, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 1, 1.0, 2.0, 0.0, 1), (4, 1, 3, 2.0, 4.0, 0.0, 0), (5, 2, 0, 1.0, 1.0, 0.0, 1),
(6, 2, 3, 1.0, 4.0, 0.0, 0), (7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 1),
(9, 3, 3, 1.0, 4.0, 0.0, 0), (10, 3, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_long_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_long_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 1, 1.0, 2.0, 0.0, 0), (5, 2, 2, 1.0, 3.0, 0.0, 1),
(6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 0),
(8, 5, 1, 1.0, 2.0, 0.0, 0), (9, 5, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_short_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_short_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 1),
(1, 1, 0, 1.0, 1.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 1, 1.0, 2.0, 0.0, 1), (5, 2, 2, 1.0, 3.0, 0.0, 0),
(6, 3, 1, 1.0, 2.0, 0.0, 1), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 5, 1, 1.0, 2.0, 0.0, 1), (9, 5, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_dir_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_dir_conflict=[[
'ignore',
'long',
'short',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 1, 1.0, 2.0, 0.0, 0), (6, 2, 2, 1.0, 3.0, 0.0, 1),
(7, 3, 1, 1.0, 2.0, 0.0, 0), (8, 3, 2, 1.0, 3.0, 0.0, 0),
(9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 1),
(11, 5, 1, 1.0, 2.0, 0.0, 0), (12, 5, 2, 1.0, 3.0, 0.0, 1),
(13, 6, 1, 1.0, 2.0, 0.0, 1), (14, 6, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_opposite_entry(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False]
]),
exits=pd.DataFrame([
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True]
]),
size=1.,
upon_opposite_entry=[[
'ignore',
'ignore',
'close',
'close',
'closereduce',
'closereduce',
'reverse',
'reverse',
'reversereduce',
'reversereduce'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 1),
(2, 2, 0, 1.0, 1.0, 0.0, 0), (3, 2, 1, 1.0, 2.0, 0.0, 1), (4, 2, 2, 1.0, 3.0, 0.0, 0),
(5, 3, 0, 1.0, 1.0, 0.0, 1), (6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 4, 0, 1.0, 1.0, 0.0, 0), (9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 0),
(11, 5, 0, 1.0, 1.0, 0.0, 1), (12, 5, 1, 1.0, 2.0, 0.0, 0), (13, 5, 2, 1.0, 3.0, 0.0, 1),
(14, 6, 0, 1.0, 1.0, 0.0, 0), (15, 6, 1, 2.0, 2.0, 0.0, 1), (16, 6, 2, 2.0, 3.0, 0.0, 0),
(17, 7, 0, 1.0, 1.0, 0.0, 1), (18, 7, 1, 2.0, 2.0, 0.0, 0), (19, 7, 2, 2.0, 3.0, 0.0, 1),
(20, 8, 0, 1.0, 1.0, 0.0, 0), (21, 8, 1, 2.0, 2.0, 0.0, 1), (22, 8, 2, 2.0, 3.0, 0.0, 0),
(23, 9, 0, 1.0, 1.0, 0.0, 1), (24, 9, 1, 2.0, 2.0, 0.0, 0), (25, 9, 2, 2.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(**kwargs, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 2, 1.0, 3.0, 0.0, 0),
(2, 1, 0, 1.0, 1.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 1, 1.0, 2.0, 0.0, 1), (6, 2, 2, 1.0, 3.0, 0.0, 0),
(7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 0), (9, 3, 2, 1.0, 3.0, 0.0, 1),
(10, 4, 0, 1.0, 1.0, 0.0, 0), (11, 4, 1, 1.0, 2.0, 0.0, 1), (12, 4, 2, 1.0, 3.0, 0.0, 0),
(13, 5, 0, 1.0, 1.0, 0.0, 1), (14, 5, 1, 1.0, 2.0, 0.0, 0), (15, 5, 2, 1.0, 3.0, 0.0, 1),
(16, 6, 0, 1.0, 1.0, 0.0, 0), (17, 6, 1, 2.0, 2.0, 0.0, 1), (18, 6, 2, 2.0, 3.0, 0.0, 0),
(19, 7, 0, 1.0, 1.0, 0.0, 1), (20, 7, 1, 2.0, 2.0, 0.0, 0), (21, 7, 2, 2.0, 3.0, 0.0, 1),
(22, 8, 0, 1.0, 1.0, 0.0, 0), (23, 8, 1, 1.0, 2.0, 0.0, 1), (24, 8, 2, 1.0, 3.0, 0.0, 0),
(25, 9, 0, 1.0, 1.0, 0.0, 1), (26, 9, 1, 1.0, 2.0, 0.0, 0), (27, 9, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_both(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 3, 1.0, 4.0, 0.0, 1), (1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 3, 2.0, 4.0, 0.0, 1),
(3, 2, 0, 1.0, 1.0, 0.0, 0), (4, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1), (2, 2, 0, 1.0, 1.0, 0.0, 0),
(3, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 0.25, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 0.5, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_both(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_signals_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 1, 0],
[1, 0, 2]
])
)
pf = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100.0, 1.0, 0.0, 0), (1, 2, 1, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 1, 2, 100.0, 1.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 0, 3, 100.0, 1.0, 0.0, 1),
(6, 2, 3, 100.0, 1.0, 0.0, 0), (7, 2, 4, 100.0, 1.0, 0.0, 1), (8, 1, 4, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_sl_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.0, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 3, 20.0, 2.0, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.25, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 1, 20.0, 4.25, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0), (6, 3, 1, 20.0, 4.0, 0.0, 1),
(7, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1),
(4, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 2.0, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 3, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0),
(4, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.75, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 1, 100.0, 1.75, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1), (6, 3, 1, 100.0, 2.0, 0.0, 0),
(7, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_ts_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(ts_stop=-0.1)
close = pd.Series([4., 5., 4., 3., 2.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.0, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 4, 25.0, 2.0, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.0, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1),
(4, 3, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
print('here')
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.25, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 2, 25.0, 4.25, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0), (6, 3, 2, 25.0, 4.125, 0.0, 1),
(7, 4, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.25, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1), (4, 2, 1, 25.0, 5.25, 0.0, 0),
(5, 3, 0, 25.0, 4.0, 0.0, 1), (6, 3, 1, 25.0, 5.25, 0.0, 0),
(7, 4, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([2., 1., 2., 3., 4.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 1.0, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0),
(4, 3, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 2.0, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 4, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 0.75, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0), (4, 2, 1, 50.0, 0.5, 0.0, 1),
(5, 3, 0, 50.0, 2.0, 0.0, 0),
(6, 4, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 1.75, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 2, 50.0, 1.75, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1), (6, 3, 2, 50.0, 1.75, 0.0, 0),
(7, 4, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_tp_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = | pd.Series([5., 4., 3., 2., 1.], index=price.index) | pandas.Series |
from logging import getLogger
logger = getLogger("__name__")
from sklearn.decomposition import PCA
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
import warnings
from .plot import annotate_points, _def_label_alignment
import seaborn as sns
from matplotlib.patches import Ellipse
import matplotlib.transforms as transforms
from scipy.stats import chi2
def confidence_ellipse(x, y, ax, ci=0.95, color="red", facecolor="none", **kwargs):
"""
Create a plot of the covariance confidence ellipse of *x* and *y*.
Parameters
----------
x, y : array-like, shape (n, )
Input data.
ax : matplotlib.axes.Axes
The axes object to draw the ellipse into.
n_std : float
The number of standard deviations to determine the ellipse's radiuses.
**kwargs
Forwarded to `~matplotlib.patches.Ellipse`
Returns
-------
matplotlib.patches.Ellipse
"""
if ax is None:
ax = plt.gca()
if len(x) < 4:
raise Exception("need more than 3 data points")
if x.size != y.size:
raise ValueError("x and y must be the same size")
cov = np.cov(x, y)
pearson = cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])
# Using a special case to obtain the eigenvalues of this
# two-dimensionl dataset.
ell_radius_x = np.sqrt(1 + pearson)
ell_radius_y = np.sqrt(1 - pearson)
ellipse = Ellipse(
(0, 0),
width=ell_radius_x * 2,
height=ell_radius_y * 2,
facecolor=facecolor,
edgecolor=color,
**kwargs,
)
s = chi2.ppf(ci, 2)
# Calculating the stdandard deviation of x from
# the squareroot of the variance and multiplying
# with the given number of standard deviations.
scale_x = np.sqrt(cov[0, 0] * s)
mean_x = np.mean(x)
# calculating the stdandard deviation of y ...
scale_y = np.sqrt(cov[1, 1] * s)
mean_y = np.mean(y)
transf = (
transforms.Affine2D()
.rotate_deg(45)
.scale(scale_x, scale_y)
.translate(mean_x, mean_y)
)
ellipse.set_transform(transf + ax.transData)
return ax.add_patch(ellipse)
def plot_confidence_ellipses(
x,
y,
groups,
order=None,
colors=None,
confidence_interval=0.95,
facecolor="none",
ax=None,
**kwargs,
):
x = np.array(x)
y = np.array(y)
if ax is None:
ax = plt.subplot(111)
if order is None:
order = np.unique(groups)
if colors is None:
colors = sns.color_palette(n_colors=len(order))
if kwargs is None:
kwargs = {}
for n, g in enumerate(order):
confidence_ellipse(
x[groups == g],
y=y[groups == g],
ax=ax,
color=colors[n],
ci=confidence_interval,
facecolor=facecolor,
**kwargs,
)
return ax
class DimRed:
def __init__(
self, data, method=PCA, transformation=None, n_components=None, **kargs
):
if n_components is None:
n_components = data.shape[0]
if data.shape[0] > data.shape[1]:
print(
"you don't need to reduce dimensionality or your dataset is transposed."
)
self.decomposition = method(n_components=n_components, **kargs)
self.rawdata = data
# self.variable_names = data.columns
# self.sample_names = data.index
if transformation is None:
self.data_ = self.rawdata
else:
self.data_ = data.applymap(transformation)
Xt = self.decomposition.fit_transform(self.data_)
self.transformed_data = pd.DataFrame(
Xt[:, : (n_components + 1)],
index=data.index,
columns=np.arange(n_components) + 1,
)
name_components = ["components_"]
for name in name_components:
if hasattr(self.decomposition, name):
self.components = pd.DataFrame(
getattr(self.decomposition, name),
index=np.arange(n_components) + 1,
columns=data.columns,
)
if not hasattr(self, "components"):
warnings.warn(
"Couldn't define components, wil not be able to plot loadings"
)
def set_axes_labels_(self, ax, components):
if hasattr(self.decomposition, "explained_variance_ratio_"):
ax.set_xlabel(
"PC {} [{:.1f} %]".format(
components[0],
self.decomposition.explained_variance_ratio_[components[0] - 1]
* 100,
)
)
ax.set_ylabel(
"PC {} [{:.1f} %]".format(
components[1],
self.decomposition.explained_variance_ratio_[components[1] - 1]
* 100,
)
)
else:
ax.set_xlabel("Component {} ".format(components[0]))
ax.set_ylabel("Component {} ".format(components[1]))
def plot_explained_variance_ratio(self, n_components=25, **kwargs):
explained_variance_ratio = self.decomposition.explained_variance_ratio_
n = min(n_components, len(explained_variance_ratio))
plt.bar(np.arange(n), explained_variance_ratio[:n], **kwargs)
ax = plt.gca()
ax.set_xlabel("Principal Component")
ax.set_ylabel("Explained Variance Ratio")
return ax
def plot_components(
self,
components=(1, 2),
ax=None,
groups=None,
plot_ellipse=False,
label_points=False,
confidence_interval=0.95,
order_groups=None,
colors=None,
**scatter_args,
):
components = list(components)
assert len(components) == 2, "expect two components"
if ax is None:
ax = plt.subplot(111)
if (groups is not None) and (order_groups is None):
order_groups = np.unique(groups)
x, y = (
self.transformed_data[components[0]],
self.transformed_data[components[1]],
)
overwritten_seaborn_kargs = {
"hue": "groups",
"hue_order": "order_groups",
"palette": "colros",
}
for k in overwritten_seaborn_kargs:
if k in scatter_args:
raise ValueError(
f"You provided `{k}` as keyword. However `{k}` is overwritten by the `{overwritten_seaborn_kargs[k]}` argument."
)
sns.scatterplot(
x=x,
y=y,
ax=ax,
hue=groups,
hue_order=order_groups,
palette=colors,
**scatter_args,
)
ax.axis("equal")
self.set_axes_labels_(ax, components)
if label_points:
annotate_points(data=self.transformed_data[components], ax=ax)
if plot_ellipse:
if groups is None:
raise Exception("`groups`` is required for plotting confidence ellipse")
plot_confidence_ellipses(
x,
y,
groups,
order=order_groups,
colors=colors,
confidence_interval=confidence_interval,
ax=ax,
)
return ax
def plot_loadings(self, components=(1, 2), ax=None, **scatter_args):
if ax is None:
ax = plt.subplot(111)
components = list(components)
assert len(components) == 2, "expect two components"
sns.scatterplot(
x=self.components.loc[components[0]],
y=self.components.loc[components[1]],
ax=ax,
**scatter_args,
)
ax.axis("equal")
self.set_axes_labels_(ax, components)
return ax
def _detect_which_arrows_to_vizualize(self, loadings, n_arrows=None):
assert loadings.shape[0] == 2
radius = np.sqrt(sum(loadings.values**2))
radius = | pd.Series(radius, self.components.columns) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 5, 2020
@authors: enzoampil & jpdeleon
"""
# Import standard library
import os
from inspect import signature
from datetime import datetime
import warnings
from pathlib import Path
from string import digits
import requests
import json
import re
# Import modules
import numpy as np
import pandas as pd
from tqdm import tqdm
from bs4 import BeautifulSoup
from pandas.io.json import json_normalize
import matplotlib.pyplot as pl
import matplotlib as mpl
# Import from package
from fastquant import get_stock_data, DATA_PATH
warnings.simplefilter("ignore")
mpl.style.use("fivethirtyeight")
COOKIES = {
"BIGipServerPOOL_EDGE": "1427584378.20480.0000",
"JSESSIONID": "r2CYuOovD47c6FDnDoxHKW60.server-ep",
}
CALENDAR_FORMAT = "%m-%d-%Y"
TODAY = datetime.now().date().strftime(CALENDAR_FORMAT)
__all__ = [
"DisclosuresPSE",
"DisclosuresInvestagrams",
"get_company_disclosures",
]
class DisclosuresPSE:
"""
Disclosures scraped from PSE
Attribues
---------
disclosures_combined : pd.DataFrame
Company disclosure summary
"""
def __init__(
self,
symbol,
disclosure_type="all",
start_date="1-1-2020",
end_date=None,
verbose=True,
clobber=False,
):
"""
Parameters
----------
symbol : str
company symbol
disclosure_type : str
type of disclosure available
start_date : str
start date with format %m-%d-%Y
end_date : str
end date with format %m-%d-%Y
"""
self.symbol = symbol.upper()
self.start_date = start_date
self.end_date = TODAY if end_date is None else end_date
self.disclosure_type = disclosure_type
self.stock_data = None
self.verbose = verbose
self.clobber = clobber
if self.verbose:
print("Pulling {} disclosures summary...".format(self.symbol))
self.files = list(
Path(DATA_PATH).glob("{}_disclosures_*.csv".format(self.symbol))
)
self.fp = Path(
DATA_PATH,
"{}_disclosures_{}_{}.csv".format(
self.symbol, self.start_date, self.end_date
),
)
self.company_disclosures = self.get_company_disclosures()
self.disclosure_types = (
self.company_disclosures["Template Name"]
.apply(_remove_amend)
.unique()
)
if self.verbose:
print(
"Found {} disclosures between {} & {} with {} types:\n{}".format(
len(self.company_disclosures),
self.start_date,
self.end_date,
len(self.disclosure_types),
self.disclosure_types,
)
)
print("Pulling details in all {} disclosures...".format(self.symbol))
self.disclosure_tables = self.get_all_disclosure_tables()
self.disclosure_tables_df = self.get_all_disclosure_tables_df()
self.disclosure_backgrounds = self.get_disclosure_details()
self.disclosure_subjects = self.get_disclosure_details(
key="Subject of the Disclosure"
)
self.disclosures_combined = self.get_combined_disclosures()
errmsg = "{} not available between {} & {}.\n".format(
self.disclosure_type, self.start_date, self.end_date
)
errmsg += "Try {}.".format(self.disclosure_types)
if self.disclosure_type != "all":
assert self.disclosure_type in self.disclosure_types, errmsg
self.page_count, self.results_count = None, None
def __repr__(self):
"""show class description after istantiation
"""
fields = signature(self.__init__).parameters
values = ", ".join(repr(getattr(self, f)) for f in fields)
return "{}({})".format(type(self).__name__, values)
def get_stock_data(self, format="ohlc"):
"""overwrites get_stock_data
Note that stock data requires YYYY-MM-DD
"""
start_date = format_date(
self.start_date, informat=CALENDAR_FORMAT, outformat="%Y-%m-%d"
)
end_date = format_date(
self.end_date, informat=CALENDAR_FORMAT, outformat="%Y-%m-%d"
)
if self.verbose:
print("Pulling {} stock data...".format(self.symbol))
data = get_stock_data(
self.symbol,
start_date=start_date,
end_date=end_date,
format=format,
)
self.stock_data = data
return data
def get_company_disclosures_page(self, page=1):
"""
Gets company disclosures for one page
FIXME:
This can be loaded using:
cols = ['Company Name', 'Template Name', 'PSE Form Number',
'Announce Date and Time', 'Circular Number', 'edge_no', 'url']
self.company_disclosures = pd.read_csv(self.fp)[cols]
but posting request is fast anyway
"""
headers = {
"Origin": "https://edge.pse.com.ph",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-PH,en-US;q=0.9,en;q=0.8",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Accept": "*/*",
"Referer": "https://edge.pse.com.ph/announcements/form.do",
"X-Requested-With": "XMLHttpRequest",
"Connection": "keep-alive",
}
data = {
"pageNo": page,
"companyId": "",
"keyword": self.symbol,
"tmplNm": "",
"fromDate": self.start_date,
"toDate": self.end_date,
}
response = requests.post(
"https://edge.pse.com.ph/announcements/search.ax",
headers=headers,
cookies=COOKIES,
data=data,
)
if hasattr(response, "text"):
assert (
len(response.text) > 10
), "Empty response from edge.pse.com.ph"
html = response.text
# Indicating the parser (e.g. lxml) removes the bs warning
parsed_html = BeautifulSoup(html, "lxml")
current_page, page_count, results_count = re.findall(
r"[^A-Za-z\[\]\/\s]+",
parsed_html.find("span", {"class": "count"}).text,
)
current_page, self.page_count, self.results_count = (
int(current_page),
int(page_count),
int(results_count),
)
assert (
int(current_page) == page
), "Resulting page is not consistent with the requested page!"
table = parsed_html.find("table", {"class": "list"})
table_rows = table.find_all("tr")
lines = []
edge_nos = []
for tr in table_rows:
td = tr.find_all("td")
row = [tr.text for tr in td]
onclicks_raw = [
tr.a["onclick"]
for tr in td
if tr.a and "onclick" in tr.a.attrs.keys()
]
onclicks = [
s[s.find("('") + 2 : s.find("')")] for s in onclicks_raw
]
lines.append(row)
if onclicks:
edge_nos.append(onclicks[0])
columns = [el.text for el in table.find_all("th")]
if lines[1][0] == "no data.":
errmsg = "No disclosures between {} & {}. ".format(
self.start_date, self.end_date
)
errmsg += "Try longer date interval."
raise ValueError(errmsg)
df = pd.DataFrame(lines, columns=columns)
# Filter to rows where not all columns are null
df = df[df.isna().mean(axis=1) < 1]
df["edge_no"] = edge_nos
df["url"] = (
"https://edge.pse.com.ph/openDiscViewer.do?edge_no=" + df.edge_no
)
df["Announce Date and Time"] = pd.to_datetime(
df["Announce Date and Time"]
)
# ensure index starts at 0
return df.reset_index(drop=True)
def get_company_disclosures(self):
"""
Gets company disclosures for all pages
"""
first_page_df = self.get_company_disclosures_page(page=1)
print("{} pages detected!".format(self.page_count))
if self.page_count == 1:
disclosures_df = first_page_df
else:
page_dfs = [first_page_df]
# We skip the first since we already have it
for page_num in range(2, self.page_count + 1):
page_df = self.get_company_disclosures_page(page=page_num)
page_dfs.append(page_df)
pages_df = pd.concat(page_dfs).reset_index(drop=True)
disclosures_df = pages_df
return disclosures_df
def get_disclosure_file_id(self, edge_no):
"""
Returns file ID of a specified disclosure based on its edge number
ETA: 6.2 seconds per run
"""
headers = {
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Sec-Fetch-Site": "none",
"Sec-Fetch-Mode": "navigate",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-PH,en-US;q=0.9,en;q=0.8",
}
params = (("edge_no", edge_no),)
response = requests.get(
"https://edge.pse.com.ph/openDiscViewer.do",
headers=headers,
params=params,
cookies=COOKIES,
)
html = response.text
parsed_html = BeautifulSoup(html, "lxml")
s = parsed_html.iframe["src"]
file_id = s[s.find("file_id=") + 8 :]
return file_id
def get_disclosure_parsed_html(self, disclosure_file_id):
"""
Returns the bs parsed html for a disclosure given its file id
ETA: 6.55 seconds per run
"""
headers = {
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-Mode": "nested-navigate",
"Referer": "https://edge.pse.com.ph/openDiscViewer.do?edge_no=8a9a820ee365687cefdfc15ec263a54d",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-PH,en-US;q=0.9,en;q=0.8",
}
params = (("file_id", disclosure_file_id),)
response = requests.get(
"https://edge.pse.com.ph/downloadHtml.do",
headers=headers,
params=params,
cookies=COOKIES,
)
html = response.text
parsed_html = BeautifulSoup(html, "lxml")
return parsed_html
def parse_stock_inventory(self, stock_inventory_str):
stock_inventory_lol = [
row.split("\n") for row in stock_inventory_str.split("\n\n\n\n")
]
stock_inventory_df = pd.DataFrame(
stock_inventory_lol[1:], columns=stock_inventory_lol[0]
)
stock_inventory_df.iloc[:, 1] = (
stock_inventory_df.iloc[:, 1]
.apply(lambda x: x.replace(",", ""))
.astype(int)
)
return stock_inventory_df
def get_company_summary(self, edge_no):
"""
Return the company summary (at the top of edge.pse page) given edge_no
"""
file_id = self.get_disclosure_file_id(edge_no)
parsed_html = self.get_disclosure_parsed_html(file_id)
keys = []
values = []
for dt, dd in zip(
parsed_html.find_all("dt"), parsed_html.find_all("dd")
):
# Take out first token (number followed by a period)
key = " ".join(dt.text.strip().split()[1:])
value = dd.text.strip()
if "Title of Each Class\n" in value:
stock_inventory_df = self.parse_stock_inventory(value)
keys += stock_inventory_df.iloc[:, 0].values.tolist()
values += stock_inventory_df.iloc[:, 1].values.tolist()
else:
keys.append(key)
values.append(value)
company_summary_df = pd.DataFrame()
company_summary_df["key"] = keys
company_summary_df["value"] = values
return company_summary_df
def parse_table(self, table_el):
"""
Returns a table as a dataframe from a table html element
"""
table_dict = {"header": [], "value": []}
for tr in table_el.find_all("tr"):
th = None
td = None
if tr.find("th"):
th = tr.th.text
if tr.find("td"):
td = tr.td.text
table_dict["header"].append(th)
table_dict["value"].append(td)
return pd.DataFrame(table_dict)
def get_tables(self, parsed_html):
"""
Returns a list of tables as pd.DataFrame's from parsed HTML
"""
table_els = parsed_html.find_all("table")
table_dfs = []
for table_el in table_els:
table_df = self.parse_table(table_el)
table_dfs.append(table_df)
return table_dfs
def get_disclosure_tables(self, edge_no):
"""
Returns the disclosure details (at the bottom of edge.pse page) given edge_no
"""
file_id = self.get_disclosure_file_id(edge_no)
parsed_html = self.get_disclosure_parsed_html(file_id)
tables = self.get_tables(parsed_html)
k, v = [], []
for tab in tables:
header = tab.header.dropna().values
value = tab.value.dropna().values
for i, j in zip(header, value):
k.append(i)
v.append(j)
df = pd.DataFrame(np.c_[k, v], columns=["key", "value"])
return df
def load_disclosures(self):
"""Loads disclosures data from disk and append older or newer if necessary
"""
errmsg = "No cache file found."
assert len(self.files) > 0, errmsg
data = pd.read_csv(self.files[0])
data = data.dropna(subset=["Announce Date and Time"])
newest_date = data["Announce Date and Time"].iloc[1]
oldest_date = data["Announce Date and Time"].iloc[-1]
disclosure_details = {}
# append older disclosures
older = (
oldest_date > self.company_disclosures["Announce Date and Time"]
)
idxs1 = np.flatnonzero(older)
if older.sum() > 0:
for idx in tqdm(idxs1):
edge_no = self.company_disclosures.iloc[idx]["edge_no"]
df = self.get_disclosure_tables(edge_no)
disclosure_details[edge_no] = df
# load local data from disk
# FIXME: the JSON object must be str, bytes or bytearray, not float
for key, row in data.iterrows():
try:
edge_no = row["edge_no"]
df = json_normalize(json.loads(row["disclosure_table"])).T
df = df.reset_index()
df.columns = ["key", "value"]
disclosure_details[edge_no] = df
except Exception as e:
print(e)
# append newer disclosures
newer = (
newest_date < self.company_disclosures["Announce Date and Time"]
)
idxs2 = np.flatnonzero(newer)
# append newer disclosures
if newer.sum() > 0:
for idx in tqdm(idxs2):
edge_no = self.company_disclosures.iloc[idx]["edge_no"]
df = self.get_disclosure_tables(edge_no)
disclosure_details[edge_no] = df
if self.verbose:
print("Loaded: {}".format(self.files[0]))
if (older.sum() > 1) or (newer.sum() > 1):
# remove older file
os.remove(self.files[0])
if self.verbose:
print("Deleted: {}".format(self.files[0]))
self.clobber = True
return disclosure_details
def get_all_disclosure_tables(self):
"""
Returns a dict after iterating all disclosures
"""
if (len(self.files) == 0) or self.clobber:
disclosure_details = {}
for edge_no in tqdm(self.company_disclosures["edge_no"].values):
df = self.get_disclosure_tables(edge_no)
disclosure_details[edge_no] = df
else:
disclosure_details = self.load_disclosures()
return disclosure_details
def get_all_disclosure_tables_df(self):
"""
Returns disclosure tables as a dataframe
"""
values = []
for edge_no in self.disclosure_tables.keys():
df = self.disclosure_tables[edge_no]
df_dict = {k: v for k, v in df.values}
# Convert dictionary to string
values.append(json.dumps(df_dict))
return pd.DataFrame(values, columns=["disclosure_table"])
def get_disclosure_details(
self, key="Background/Description of the Disclosure"
):
"""
Returns a dataframe of specific data from disclosure_tables
"""
values = []
for edge_no in self.disclosure_tables.keys():
df = self.disclosure_tables[edge_no]
idx = df["key"].isin([key])
value = df.loc[idx, "value"].values
values.append(value)
# dataframe is used instead of series for better parsing
s = pd.DataFrame(values, columns=[key])
return s
def get_combined_disclosures(self):
"""
Returns a dataframe of useful disclosure attributes
"""
df = pd.concat(
[
self.company_disclosures,
self.disclosure_tables_df,
self.disclosure_backgrounds,
self.disclosure_subjects,
],
axis=1,
ignore_index=False,
)
if (len(self.files) == 0) or self.clobber:
df.to_csv(self.fp)
if self.verbose:
print("Saved: {}".format(self.fp))
return df
def filter_disclosures(self, indicator="close", operation="max"):
"""
get disclosures co-incident to an extremum in percent change
"""
# remove NaN
df = self.disclosures_combined.copy()
df.dropna(subset=["Announce Date and Time"], inplace=True)
disclosure_dates = df["Announce Date and Time"].apply(
lambda x: x.date()
)
if self.stock_data is None:
_ = self.get_stock_data()
df2 = self.stock_data[indicator].pct_change()
idx2 = df2.index.isin(disclosure_dates)
if operation == "max":
date = disclosure_dates.iloc[np.argmax(idx2)]
elif operation == "min":
date = disclosure_dates.iloc[np.argmin(idx2)]
else:
raise ValueError("operation=min,max")
return df[disclosure_dates == date]
def plot_disclosures(
self, disclosure_type=None, indicator="close", diff=True, percent=True
):
"""
Parameters
----------
disclosure_type : str
type of disclosure to highlight (default=all)
indicator : str
stock data to overplot (close or volume)
diff : bool
show previous trading day difference
percent : True
show percent change if diff=True
Returns a figure instance
"""
disclosure_type = (
self.disclosure_type
if disclosure_type is None
else disclosure_type
)
fig = pl.figure(figsize=(15, 10))
if self.stock_data is None:
data = self.get_stock_data()
else:
data = self.stock_data
colors = mpl.cm.rainbow(np.linspace(0, 1, len(self.disclosure_types)))
color_map = {n: colors[i] for i, n in enumerate(self.disclosure_types)}
df, label = data[indicator], indicator
if diff:
df = data[indicator].diff()
label = indicator + " diff"
if percent:
df = data[indicator].pct_change()
label = label + " (%)"
ax = df.plot(c="k", zorder=1, label=label)
if diff:
# add horizontal line at zero
ax.axhline(0, 0, 1, color="k", zorder=0, alpha=0.1)
# add vertical line for each disclosure release date
for key, row in self.company_disclosures.iterrows():
date = row["Announce Date and Time"]
template = _remove_amend(row["Template Name"])
if template.lower() == disclosure_type.lower():
ax.axvline(
date,
0,
1,
color=color_map[template],
zorder=0,
label=template,
)
elif disclosure_type == "all":
ax.axvline(
date,
0,
1,
color=color_map[template],
zorder=0,
label=template,
)
# show only unique legends
handles, labels = ax.get_legend_handles_labels()
by_label = dict(zip(labels, handles))
ax.legend(by_label.values(), by_label.keys())
ax.set_ylabel(label.upper())
ax.set_title(self.symbol)
return fig
def __call__(self):
# return parsed data after instantiation
return self.disclosures_combined
class DisclosuresInvestagrams:
"""
Disclosures scraped from investagrams
Attribues
---------
disclosures_df : pd.DataFrame
parsed disclosures
"""
def __init__(self, symbol, from_date, to_date):
"""
symbol : str
phisix symbol
from_date : str
(%Y-%m-%d)
end_date = str
(%Y-%m-%d)
"""
self.symbol = symbol
self.from_date = from_date
self.to_date = to_date
self.disclosures_json = self.get_disclosures_json()
self.disclosures_dict = self.get_disclosures_df()
self.earnings = self.disclosures_dict["E"]
self.dividends = self.disclosures_dict["D"]
def get_disclosures_json(self):
headers = {
"Accept": "application/json, text/javascript, */*; q=0.01",
"Referer": "https://www.investagrams.com/Stock/PSE:JFC",
"Origin": "https://www.investagrams.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36",
"Content-Type": "text/plain; charset=utf-8",
}
from_date_epoch = date_to_epoch(self.from_date)
to_date_epoch = date_to_epoch(self.to_date)
params = (
("symbol", "PSE:{}".format(self.symbol)),
("from", from_date_epoch),
("to", to_date_epoch),
("resolution", "D"), # Setting D (daily) by default
)
response = requests.post(
"https://webapi.investagrams.com/InvestaApi/TradingViewChart/timescale_marks",
headers=headers,
params=params,
)
if hasattr(response, "text"):
assert (
len(response.text) > 10
), "Empty response from investagrams.com"
return response.json()
def disclosures_json_to_df(self):
disclosure_dfs = {}
for disc in ["D", "E"]:
filtered_examples = [
ex for ex in self.disclosures_json if ex["label"] == disc
]
additional_feats_df = pd.DataFrame(
[
dict(
[
tuple(item.split(":"))
for item in ex["tooltip"]
if ":" in item
]
)
for ex in filtered_examples
]
)
main_df = pd.DataFrame(filtered_examples)[
["id", "time", "color", "label"]
]
combined = pd.concat([main_df, additional_feats_df], axis=1)
combined["time"] = | pd.to_datetime(combined.time, unit="s") | pandas.to_datetime |
"""
Copyright 2021 Novartis Institutes for BioMedical Research Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# ---
import os
import random
import sys
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import rdkit.Chem as Chem
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
# --- JT-VAE
from jtnn import * # not cool, but this is how they do it ...
from jtnn.datautils import ToxPropDataset
# --- disable rdkit warnings
from rdkit import RDLogger
from torch.utils import data
from toxsquad.data import *
from toxsquad.losses import *
from toxsquad.modelling import *
from toxsquad.visualizations import Visualizations
# --- toxsquad
lg = RDLogger.logger()
lg.setLevel(RDLogger.CRITICAL)
import os
import pickle
# ------------ PRE-PROCESSING ROUTINES ------------
from mol_tree import *
def save_object(obj, filename):
with open(filename, "wb") as output: # Overwrites any existing file.
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
def open_object(filename):
with open(filename, "rb") as input:
reopened = pickle.load(input)
return reopened
def get_vocab(assay_dir, assay_id, toxdata):
filename = assay_dir + "/jtvae/" + str(assay_id) + "-vocab.pkl"
if os.path.isfile(filename):
print("Re-opening vocabulary file")
vocab = open_object(filename)
else:
print("Deriving vocabulary")
vocab = set()
for (
smiles
) in toxdata.smiles: # I guess here we should only use the training data??
mol = MolTree(smiles)
for c in mol.nodes:
vocab.add(c.smiles)
vocab = Vocab(list(vocab))
save_object(vocab, filename)
return vocab
# ------------ MODEL OPTIMIZATION ROUTINES ------------
def derive_inference_model(
toxdata,
vocab,
infer_dir,
model_params,
vis,
device,
model_name,
base_lr=0.003,
beta=0.005,
num_threads = 24,
weight_decay = 0.000
):
from jtnn.jtprop_vae import JTPropVAE
smiles = toxdata.smiles
props = toxdata.val
dataset = ToxPropDataset(smiles, props)
batch_size = 8
dataloader = data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_threads,
collate_fn=lambda x: x,
drop_last=True,
)
from jtnn.jtprop_vae import JTPropVAE
model = JTPropVAE(vocab, **model_params).to(device)
optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=weight_decay)
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
scheduler.step()
# --- pre-train AE
total_step_count = 0
total_step_count = pre_train_jtvae(
model,
optimizer,
scheduler,
dataloader,
device,
infer_dir,
vis,
total_step_count,
model_name,
MAX_EPOCH=36,
PRINT_ITER=5,
)
# train (set a smaller initial LR, beta to 0.005)
optimizer = optim.Adam(model.parameters(), lr=0.0003,weight_decay=weight_decay)
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
scheduler.step()
print("[DEBUG] TRAINING")
total_step_count = train_jtvae(
model,
optimizer,
scheduler,
dataloader,
device,
infer_dir,
vis,
total_step_count,
beta=0.005,
model_name=model_name,
MAX_EPOCH=36,
PRINT_ITER=5,
)
# --- fine tune AE
# optimizer = optim.Adam(model.parameters(), lr=0.0003)
# scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
# scheduler.step()
# total_step_count = train_jtvae(model, optimizer, scheduler, dataloader, device, infer_dir, vis, total_step_count, 0.005, model_name, MAX_EPOCH=36, PRINT_ITER=5)
def cross_validate_jtvae(
toxdata,
partitions,
xval_dir,
vocab,
model_params,
device,
model_name,
base_lr=0.003,
vis_host=None,
vis_port=8097,
assay_name="",
num_threads = 24,
weight_decay = 0.0000
):
"""
:todo ensure same training parameters are used for inference and cross-val models
"""
MAX_EPOCH = 36
PRINT_ITER = 5
run = 0
scores = []
for partition in partitions:
# I/O
save_dir = xval_dir + "/run-" + str(run)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
# vis
if vis_host is not None:
vis = Visualizations(
env_name="jtvae-xval-" + str(assay_name) + "-run-" + str(run), server=vis_host, port=vis_port
)
else:
vis = None
# data
smiles = toxdata.smiles.loc[partition["train"]]
props = toxdata.val.loc[partition["train"]]
dataset = ToxPropDataset(smiles, props)
batch_size = 8
dataloader = data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_threads,
collate_fn=lambda x: x,
drop_last=True,
)
# model
from jtnn.jtprop_vae import JTPropVAE
model = JTPropVAE(vocab, **model_params).to(device)
optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=weight_decay)
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
scheduler.step()
# pretrain
print("[DEBUG] PRETRAINING")
total_step_count = pre_train_jtvae(
model,
optimizer,
scheduler,
dataloader,
device,
save_dir,
vis,
0,
model_name,
MAX_EPOCH=36,
PRINT_ITER=5,
)
# train (set a smaller initial LR, beta to 0.005)
optimizer = optim.Adam(model.parameters(), lr=0.0003,weight_decay=weight_decay)
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
scheduler.step()
print("[DEBUG] TRAINING")
total_step_count = train_jtvae(
model,
optimizer,
scheduler,
dataloader,
device,
save_dir,
vis,
total_step_count,
beta=0.005,
model_name=model_name,
MAX_EPOCH=36,
PRINT_ITER=5,
)
# evaluate (only property prediction accuracy for now)
scores.append(
evaluate_predictions_model(
model,
toxdata.smiles.loc[partition["test"]],
toxdata.val.loc[partition["test"]],
vis,
)
)
# memory management
del model
del optimizer
torch.cuda.empty_cache()
run = run + 1
return scores
def pre_train_jtvae(
model,
optimizer,
scheduler,
dataloader,
device,
model_dir,
vis,
total_step_count,
model_name,
MAX_EPOCH=36,
PRINT_ITER=5,
):
my_log = open(model_dir + "/loss-pre.txt", "w")
for epoch in range(MAX_EPOCH):
print("pre epoch: " + str(epoch))
word_acc, topo_acc, assm_acc, steo_acc, prop_acc = 0, 0, 0, 0, 0
for it, batch in enumerate(dataloader):
for mol_tree, _ in batch:
for node in mol_tree.nodes:
if node.label not in node.cands:
node.cands.append(node.label)
node.cand_mols.append(node.label_mol)
model.zero_grad()
torch.cuda.empty_cache()
loss, kl_div, wacc, tacc, sacc, dacc, pacc = model(batch, beta=0)
loss.backward()
optimizer.step()
word_acc += wacc
topo_acc += tacc
assm_acc += sacc
steo_acc += dacc
prop_acc += pacc
if (it + 1) % PRINT_ITER == 0:
word_acc = word_acc / PRINT_ITER * 100
topo_acc = topo_acc / PRINT_ITER * 100
assm_acc = assm_acc / PRINT_ITER * 100
steo_acc = steo_acc / PRINT_ITER * 100
prop_acc = prop_acc / PRINT_ITER
if vis is not None:
vis.plot_loss(word_acc, total_step_count, 1, model_name, "word-acc")
vis.plot_loss(prop_acc, total_step_count, 1, model_name, "mse")
print(
"Epoch: %d, Step: %d, KL: %.1f, Word: %.2f, Topo: %.2f, Assm: %.2f, Steo: %.2f, Prop: %.4f"
% (
epoch,
it + 1,
kl_div,
word_acc,
topo_acc,
assm_acc,
steo_acc,
prop_acc,
),
file=my_log,
flush=True,
)
word_acc, topo_acc, assm_acc, steo_acc, prop_acc = 0, 0, 0, 0, 0
del loss
del kl_div
total_step_count = total_step_count + 1
torch.cuda.empty_cache()
scheduler.step()
print("learning rate: %.6f" % scheduler.get_lr()[0])
torch.save(
model.cpu().state_dict(), model_dir + "/model-pre.iter-" + str(epoch)
)
torch.cuda.empty_cache()
model = model.to(device)
my_log.close()
return total_step_count
def train_jtvae(
model,
optimizer,
scheduler,
dataloader,
device,
model_dir,
vis,
total_step_count,
beta,
model_name,
MAX_EPOCH=36,
PRINT_ITER=5,
):
my_log = open(model_dir + "/loss-ref.txt", "w")
for epoch in range(MAX_EPOCH):
print("epoch: " + str(epoch))
word_acc, topo_acc, assm_acc, steo_acc, prop_acc = 0, 0, 0, 0, 0
for it, batch in enumerate(dataloader):
for mol_tree, _ in batch:
for node in mol_tree.nodes:
if node.label not in node.cands:
node.cands.append(node.label)
node.cand_mols.append(node.label_mol)
model.zero_grad()
torch.cuda.empty_cache()
loss, kl_div, wacc, tacc, sacc, dacc, pacc = model(batch, beta)
loss.backward()
optimizer.step()
word_acc += wacc
topo_acc += tacc
assm_acc += sacc
steo_acc += dacc
prop_acc += pacc
if (it + 1) % PRINT_ITER == 0:
word_acc = word_acc / PRINT_ITER * 100
topo_acc = topo_acc / PRINT_ITER * 100
assm_acc = assm_acc / PRINT_ITER * 100
steo_acc = steo_acc / PRINT_ITER * 100
prop_acc /= PRINT_ITER
if vis is not None:
vis.plot_loss(word_acc, total_step_count, 1, model_name, "word-acc")
vis.plot_loss(prop_acc, total_step_count, 1, model_name, "mse")
print(
"Epoch: %d, Step: %d, KL: %.1f, Word: %.2f, Topo: %.2f, Assm: %.2f, Steo: %.2f, Prop: %.4f"
% (
epoch,
it + 1,
kl_div,
word_acc,
topo_acc,
assm_acc,
steo_acc,
prop_acc,
),
file=my_log,
flush=True,
)
word_acc, topo_acc, assm_acc, steo_acc, prop_acc = 0, 0, 0, 0, 0
# if (it + 1) % 1500 == 0: # Fast annealing
# # does this make sense? With the smaller datasets
# # we don't get to 1500? Why is this happening?
# # I don't quite trust it
# # But here, since we call model.cpu()
# # we need to move the model to the device again
# # else we ran onto that weird issue!
# scheduler.step()
# print("learning rate: %.6f" % scheduler.get_lr()[0])
# #torch.save(
# # model.cpu().state_dict(),
# # model_dir + "/model-ref.iter-%d-%d" % (epoch, it + 1),
# #)
# model.to(device)
del loss
del kl_div
total_step_count = total_step_count + 1
scheduler.step()
print("learning rate: %.6f" % scheduler.get_lr()[0])
torch.save(
model.cpu().state_dict(), model_dir + "/model-ref.iter-" + str(epoch)
) # is this the expensive part?
model = model.to(device)
my_log.close()
return total_step_count
# ------------ MODEL EVALUATION ROUTINES ------------
def evaluate_predictions_model(model, smiles, props, vis):
"""
Return evaluation objects for JT-VAE model.
This function will return a list of [mse, r2] for the smiles passed in,
and also return a 2-col matrix for plotting predicted vs. actual.
vis object allows us to use Visdom to directly update
a live performance plot view.
:param model: JT-VAE model
:param smiles: Pandas series with SMILES as entries
We usually pass toxdata.smiles
:param props: Pandas series with molecular activity or property to predict
:param vis: Visualization object from toxsquad.visualizations
:returns: Scores, coords
- Scores is a list of mean squared error and correlation coefficient
(for entire smiles batch). This is of length 2.
- coords are x, y coordinates for the "performance plot"
(where x=actual and y=predicted).
"""
predictions = dict()
n_molecules = len(smiles)
coords = np.zeros((n_molecules, 2))
# k = 0;
model = model.eval()
for k, idx in enumerate(smiles.index):
print_status(k, n_molecules)
sml = smiles.loc[idx]
prop = props.loc[idx]
# model.predict(sml) returns a torch tensor
# on which we need to call .item()
# to get the actual floating point value out.
predictions[idx] = model.predict(sml).item()
coords[k, 0] = prop.item()
coords[k, 1] = predictions[idx]
# k = k + 1;
model = model.train()
mse = np.mean((coords[:, 1] - coords[:, 0]) ** 2)
corr = np.corrcoef(coords[:, 1], coords[:, 0])[0, 1]
print("MSE: " + str(mse))
print("Corr: " + str(corr))
scores = []
scores.append(mse)
scores.append(corr)
# TODO do reconstruction test
if vis is not None:
vis.plot_scatter_gt_predictions(
coords, f"{mse:.2f}" + "-r: " + f"{corr:.2f}", ""
)
return scores, coords
# ------------ LATENT SPACE ROUTINES ------------
from numpy.random import choice
from rdkit import DataStructs
from rdkit.Chem import AllChem
def get_neighbor_along_direction_tree(sample_latent, direction, step_size):
"""
Direction should be normalized
Direction is in tree space
"""
tree_vec, mol_vec = torch.chunk(sample_latent, 2, dim=1)
new_tree_vec = tree_vec + (direction * step_size)
new_sample = torch.cat([new_tree_vec, mol_vec], dim=1)
return new_sample
def get_neighbor_along_direction_graph(sample_latent, direction, step_size):
"""
Direction should be normalized
"""
tree_vec, mol_vec = torch.chunk(sample_latent, 2, dim=1)
# update graph
new_mol_vec = mol_vec + (
direction * step_size
) # maybe the step size will have to be different?
new_sample = torch.cat([tree_vec, new_mol_vec], dim=1)
return new_sample
def get_neighbors_along_directions_tree_then_graph(
model,
smiles,
directions,
scale_factors,
direction_graph,
scale_factor_graph,
n_neighbors=10,
val_to_beat=-2,
max_cosine_distance=1.6,
direction_graph_plus=None,
convert_to_pac50=False,
):
sample_latent = model.embed(smiles)
n_directions = len(directions)
new_samples = []
int_step_sizes = np.arange(-n_neighbors, n_neighbors + 1, 1)
idx = int_step_sizes == 0
int_step_sizes = np.delete(int_step_sizes, np.where(idx)[0][0])
actual_n_neighbors = len(int_step_sizes)
# dynamic range (this adds a loot of additional samples ... just takes longer)
step_sizes_graph = np.arange(-n_neighbors, n_neighbors + 1, 1)
step_sizes_graph = step_sizes_graph * scale_factor_graph
# fixed range (original implementation)
step_sizes_graph_original = np.arange(-1, 2, 1)
step_sizes_graph_original = (
step_sizes_graph_original * 0.5
) # so here the step size is also fixed!
step_sizes_graph = np.concatenate(
(step_sizes_graph, step_sizes_graph_original), axis=None
)
actual_n_neighbors_graph = len(step_sizes_graph)
# this is pretty quick, as it's just arimethic operations in latent space
# todo: since cosine similarity in latent space correlates to an extent with
# chemical similarity, we could further reduce the number of evaluations based on that
cos = nn.CosineSimilarity(dim=1)
for k in range(n_directions): # iterate over axes
step_sizes = int_step_sizes * scale_factors[k]
for i in range(actual_n_neighbors): # iterate over steps along axis
sample = get_neighbor_along_direction_tree(
sample_latent, directions[k], step_sizes[i]
) # tree sample
for j in range(actual_n_neighbors_graph): # iterate along graph axis
graph_sample = get_neighbor_along_direction_graph(
sample, direction_graph, step_sizes_graph[j]
)
# check cosine
cdistance = 1 - cos(sample_latent, graph_sample)
if cdistance.item() < max_cosine_distance:
new_samples.append(graph_sample)
# additional direction
if direction_graph_plus is not None:
graph_sample = get_neighbor_along_direction_graph(
sample, direction_graph_plus, step_sizes_graph[j]
)
# check cosine
cdistance = 1 - cos(sample_latent, graph_sample)
if cdistance.item() < max_cosine_distance:
new_samples.append(graph_sample)
# predict activity and decode samples (probably should be another function, also because this happens ALL the time)
new_smiles, new_activities, new_samples = predict_and_decode_strict(
model, new_samples, val_to_beat, convert_to_pac50
)
return (
new_samples,
new_smiles,
new_activities,
sample_latent.squeeze().cpu().detach().numpy(),
)
# I guess the min val should be informed also relative to the MSE of the model
#
def predict_and_decode_strict(model, new_samples, min_val, convert_to_pac50=False):
n_samples = len(new_samples)
new_smiles = []
new_activities = []
my_bar = None
filtered_samples = []
try:
import streamlit as st
st.write("Decoding progress")
my_bar = st.progress(0)
except ImportError:
pass
for i in range(n_samples):
if my_bar is not None:
my_bar.progress((i + 1) / n_samples)
print_status(i, n_samples)
prediction = (
model.propNN(new_samples[i]).squeeze().cpu().detach().numpy()
) # compute the activity predictions
if convert_to_pac50:
prediction = (prediction - 6) * -1
# HIGHER IS BETTER
prediction_condition = prediction > min_val
if prediction_condition:
new_activities.append(prediction)
tree_vec, mol_vec = torch.chunk(new_samples[i], 2, dim=1)
more_smiles = model.decode(tree_vec, mol_vec, prob_decode=False)
new_smiles.append(more_smiles)
new_samples[i] = new_samples[i].squeeze().cpu().detach().numpy()
filtered_samples.append(new_samples[i])
return new_smiles, new_activities, filtered_samples
def predict_and_decode(model, new_samples, show_st=False):
n_samples = len(new_samples)
new_smiles = []
new_activities = []
my_bar = None
if show_st:
try:
import streamlit as st
st.write("Decoding progress")
my_bar = st.progress(0)
except ImportError:
pass
for i in range(n_samples):
if my_bar is not None:
my_bar.progress((i + 1) / n_samples)
print_status(i, n_samples)
prediction = (
model.propNN(new_samples[i]).squeeze().cpu().detach().numpy()
) # compute the activity predictions
new_activities.append(prediction)
tree_vec, mol_vec = torch.chunk(new_samples[i], 2, dim=1)
more_smiles = model.decode(tree_vec, mol_vec, prob_decode=False)
new_smiles.append(more_smiles)
new_samples[i] = new_samples[i].squeeze().cpu().detach().numpy()
return new_smiles, new_activities
def sample_gaussian(mean, sigma, n_samples):
center = mean
covariance = sigma
m = torch.distributions.MultivariateNormal(center, covariance)
samples = []
for i in range(n_samples):
samples.append(m.sample())
samples = torch.stack(samples)
return samples
def sample_gaussian_and_predict(model, n_samples, mean, sigma):
dim = int(model.latent_size)
center = mean
covariance = sigma
m = torch.distributions.MultivariateNormal(center, covariance)
samples = []
for i in range(n_samples):
samples.append(m.sample())
samples = torch.stack(samples)
cur_vec = create_var(samples.data, False)
predictions = model.propNN(cur_vec).squeeze()
vectors = cur_vec.cpu().detach().numpy()
predictions = predictions.cpu().detach().numpy()
return vectors, predictions
def get_embeddings(model, toxdata):
k = 0
n_molecules = len(toxdata)
vectors = {}
for idx in toxdata.smiles.index:
print_status(k, n_molecules)
sml = toxdata.smiles.loc[idx]
vectors[idx] = model.embed(sml).cpu().detach().numpy().ravel()
k = k + 1
return vectors
from rdkit import DataStructs
from rdkit.Chem import AllChem
from sklearn.metrics.pairwise import cosine_similarity
def sample_latent_space(model, latent, n_samples=2000, decode=False):
mu = torch.from_numpy(np.mean(latent).values).float()
sigma = torch.from_numpy(np.cov(latent.values.transpose())).float()
return sample_latent_space_pass_normal(model, mu, sigma, n_samples, decode)
def sample_latent_space_pass_normal(model, mu, sigma, n_samples=2000, decode=False):
samples, samples_predictions = model.sample_gaussian_and_predict(
n_samples, mu, sigma
) # this is fast
samples = samples.astype("float64")
samples_predictions = samples_predictions.astype("float64")
# dim = int(model_params["latent_size"] / 2)
dim = int(model.latent_size / 2)
tree_vec = create_var(torch.from_numpy(samples[:, 0:dim]).float())
mol_vec = create_var(torch.from_numpy(samples[:, dim : dim * 2]).float())
samples_decoded = []
if decode:
for i in range(n_samples):
print_status(i, n_samples)
samples_decoded.append(
model.decode(
tree_vec[i, :].reshape(1, -1),
mol_vec[i, :].reshape(1, -1),
prob_decode=False,
)
) # this is slow
samples_decoded_df = | pd.DataFrame(data=samples_decoded) | pandas.DataFrame |
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
#import stationary_block_bootstrap as sbb
import pandas as pd
import numpy as np
import scipy.stats
import numpy
import time
import random
#import state_variables
import os
import scipy.stats
import sklearn.feature_selection
import matplotlib.gridspec as gridspec
import copy
from argotools.config import *
from argotools.forecastlib.handlers import *
from argotools.forecastlib.functions import *
import argotools.forecastlib.stationary_block_bootstrap as sbb
from argotools.dataFormatter import *
import seaborn as sns
import matplotlib.ticker as mticker
import math
from matplotlib.ticker import MaxNLocator,IndexFormatter, FormatStrFormatter
class OutputVis:
# Variables : top_n = 3, ranking_metric = 'rmse', ranking_season ='ALL_PERIOD', preds (vector/PD containing all predictions), metrics (matrix/PD containing all metrics),
# Load predictions and csvs from file,
# get name of models, number of models, name of metrics, table variable names (season1, season2... allPeriod).
# Get RANKING METRIC or all models in the file. Check if theres more than one first.
# FUNC STATISTICS BETWEEN THE MODELS : MEAN, VARIANCE, BEST MODEL, WORST MODEL
# figure 1 : Time-series, error and percent error
# figure 2: metric / plot
def __init__(self, folder_dir=None, ids=None, overview_folder='_overview'):
# Loading tables and files
if folder_dir is None:
print('WARNING! No main folder directory specified. Add it as an attribute \
specify it on every function call that requires it.')
self.folder_main = folder_dir
self.ids = ids
self.overview_folder = overview_folder
print('Visualizer initialized')
# imported VARS
def plot_SEC(self, series_filepath=None, coeff_filepath=None, target_name='ILI', models=None, color_dict=None, start_period=None, end_period=None, alpha_dict=None, output_filename=None, ext='png', mode='save', n_coeff=20, cmap_color='RdBu_r', error_type='Error', vmin=-1, vmax=1, font_path=None):
if font_path:
from matplotlib import font_manager
prop = font_manager.FontProperties(fname=font_path)
if color_dict is None:
color_dict = dict(zip(models, [tuple(np.random.random(3)) for mod in models]))
if alpha_dict is None:
alpha_dict = dict(zip(models, [1 for mod in models]))
series_df = pd.read_csv(series_filepath, index_col=0)
coeff_df = pd.read_csv(coeff_filepath, index_col=0)
if start_period is None:
start_period = series_df.index[0]
if end_period is None:
end_period = series_df.index[-1]
series_df = series_df[start_period:end_period]
coeff_df = coeff_df[start_period:end_period]
target = series_df[target_name].values
series = {}
errors = {}
for mod in models:
series[mod] = series_df[mod].values
errors[mod] = np.abs(target - series[mod])
indices = list(series_df[target_name].index.values)
#plotting target
f, axarr = plt.subplots(3,2, gridspec_kw = {'height_ratios':[2,1,3], 'width_ratios':[16,1]})
axarr[0,0].fill_between(x=list(range(len(indices))),y1=target, facecolor='gray', alpha=0.5, label=target_name)
#plotting series
for mod in models:
axarr[0,0].plot(series[mod], label=mod, color=color_dict[mod], alpha=alpha_dict[mod])
axarr[1,0].plot(errors[mod], color=color_dict[mod], alpha=alpha_dict[mod])
if n_coeff is None:
n_coeff = coeff_df.shape[1]
means = coeff_df.mean(axis=0)
coeff_names = list(coeff_df)
ordered_names = [ name for v, name in sorted(zip(means, coeff_names), key=lambda x: x[0], reverse=True)]
coeff_df = coeff_df[ordered_names[:n_coeff]]
sns.heatmap(coeff_df.T, vmin=vmin, vmax=vmax, cmap=cmap_color, center=None, \
robust=False, annot=None, fmt='.2g', annot_kws=None, linewidths=0,\
linecolor='white', cbar=True, cbar_kws=None, cbar_ax=axarr[2,1], square=False,\
xticklabels='auto', yticklabels=True, mask=None, ax=axarr[2,0])
plt.gcf().set_size_inches([10, int(n_coeff/2)])
plt.sca(axarr[0,0])
plt.legend(frameon=False, ncol=len(models))
plt.xlim([0, len(indices)])
plt.ylim(bottom=0)
plt.xticks(range(len(indices)),indices, rotation=0)
plt.gca().xaxis.set_major_formatter(IndexFormatter(indices))
plt.gca().xaxis.set_major_locator(mticker.MaxNLocator(6))
plt.gca().set_xticklabels([])
plt.grid(linestyle = 'dotted', linewidth = .6)
plt.sca(axarr[1,0])
plt.xlim([0, len(indices)])
plt.xticks(range(len(indices)),indices, rotation=0)
plt.gca().xaxis.set_major_formatter(IndexFormatter(indices))
plt.gca().xaxis.set_major_locator(mticker.MaxNLocator(6))
plt.gca().set_xticklabels([])
plt.grid(linestyle = 'dotted', linewidth = .6)
plt.sca(axarr[0,1])
plt.axis('off')
plt.sca(axarr[1,1])
plt.axis('off')
plt.sca(axarr[2,0])
plt.xticks(range(len(indices)),indices, rotation=0)
plt.gca().xaxis.set_major_formatter(IndexFormatter(indices))
plt.gca().xaxis.set_major_locator(mticker.MaxNLocator(6))
plt.gca().set_yticklabels(ordered_names[:n_coeff], fontproperties=prop)
# STYLE
axarr[0,0].spines['right'].set_visible(False)
axarr[0,0].spines['top'].set_visible(False)
axarr[1,0].spines['right'].set_visible(False)
axarr[1,0].spines['top'].set_visible(False)
axarr[0,0].set_ylabel(target_name)
axarr[1,0].set_ylabel(error_type)
plt.subplots_adjust(left=.2, bottom=.1, right=.95, top=.9, wspace=.05, hspace=.20)
if mode == 'show':
plt.show()
elif mode == 'save':
if output_filename is None:
output_filename = '{0}_coefficients'.format(model)
plt.savefig('{0}/{1}/{2}.{3}'.format(self.folder_main, id_, output_filename, ext), format=ext)
else:
plt.savefig(output_filename+'.{0}'.format(ext), format=ext)
plt.close()
def plot_coefficients(self, id_=None, model=None, coefficients_filepath=None, cmap_color='RdBu_r',\
n_coeff=None, filename='_coefficients.csv', output_filename=None, ext='png', mode='show'):
if coefficients_filepath:
coefficients = pd.read_csv(coefficients_filepath, index_col=0)
else:
coefficients = pd.read_csv('{0}/{1}/{2}'.format(self.folder_main, id_, model), index_col=0)
coefficients.fillna(0)
if n_coeff is None:
n_coeff = coefficients.shape[1]
means = coefficients.mean(axis=0)
coeff_names = list(coefficients)
ordered_names = [ name for v, name in sorted(zip(means, coeff_names), key=lambda x: x[0], reverse=True)]
coefficients = coefficients[ordered_names[:n_coeff]]
sns.heatmap(coefficients.T, vmin=None, vmax=None, cmap=cmap_color, center=None, \
robust=False, annot=None, fmt='.2g', annot_kws=None, linewidths=0,\
linecolor='white', cbar=True, cbar_kws=None, cbar_ax=None, square=False,\
xticklabels='auto', yticklabels=True, mask=None, ax=None)
plt.gcf().set_size_inches([10, int(n_coeff/3)])
if mode == 'show':
plt.show()
elif mode == 'save':
if output_filename is None:
output_filename = '{0}_coefficients'.format(model)
plt.savefig('{0}/{1}/{2}.{3}'.format(folder_main, id_, output_filename, ext), format=ext)
else:
plt.savefig(output_filename+'.{0}'.format(ext), format=ext)
plt.close()
def inter_group_lollipop_comparison(ids_dict, path_dict, metric, period, models, benchmark, color_dict=None, alpha_dict=None, metric_filename='metrics.csv', bar_separation_multiplier=1.5, mode='show', output_filename='LollipopTest', plot_domain=None, ext='png'):
"""
Plots the ratio of the metric score for each of the models against a benchmark in a lollipop plot to compare between experiments.
Parameters
__________
ids_dict: dict
Dictionary containing the list of ids for each experiment
path_dict: dict
Dictionary containing the path to the experiment folders (must coincide with the keys of ids_dict)
metric: str
String containing the name of the metric to look for in the predictions file
period: str
Column name containing the values to plot
models: List, optional (default None)
String list containing the names of the models to plot
benchmark: str
The name within "models" which will serve as the benchmark
color_dict : dict
Dictionary containing specific colors for the models to plot
metric_filename : str, optional (default metrics.csv)
mode : str, optional (default is 'save')
If 'save', then function saves plot on the id_ specific folder.
if 'show, then function plots and used plt.show to pop up the plot real time'
alpha_dict : dict, optional (default is None)
dictionary specifying the opacity of the bars in the plot (alpha argument in matplotlib).
If set to None, then all opacities are set to 1
output_filename : str, optional (default is None)
If set to None, output_filename is set metricname_barplot
ext : str, optional (default is png)
Extension formal to save the barplot.
plot_domain : list, optional (default is [0,1])
list of two integers that sets the limits in the plot (plt.xlim)
bar_separation_multiplier : float, optional (default is 1)
Parameter that functions as multiplier for the separion between bars in the plot.
if set to 1, then bars are plotted in locations 1,2,3... if set to 2, then 2,4,6, etc
"""
fig, axarr = plt.subplots(len(ids_dict.keys()),1)
axes = axarr.ravel()
if color_dict is None:
color_dict = dict(zip(models, ['b']*len(models)))
if alpha_dict is None:
alpha_dict = dict(zip(models, [1]*len(models)))
for i, (experiment, folder_main) in enumerate(path_dict.items()):
plt.sca(axes[i])
ids = ids_dict[experiment]
values_dict = dict(zip(models, [[] for mod in models]))
min_val = float('inf')
max_val = float('-inf')
indices = []
overview_path = '{0}/{1}'.format(folder_main, '_overview')
for i, id_ in enumerate(ids):
indices.append(i*bar_separation_multiplier)
id_path = '{0}/{1}'.format(folder_main, id_)
df = pd.read_csv('{0}/{1}'.format(id_path, metric_filename))
df = df[df['METRIC']==metric]
for j, mod in enumerate(models):
ratio = copy.copy(df[df['MODEL']==mod][period].values[0]/df[df['MODEL']==benchmark][period].values[0])
if metric in ['ERROR', 'RMSE', 'NRMSE', 'MAPE']:
ratio=(1/ratio)
values_dict[mod].append(ratio)
if ratio < min_val:
min_val = ratio
if ratio > max_val:
max_val = ratio
bar_width = 1/len(models)
indices = np.array(indices)
for i, mod in enumerate(models):
heights = values_dict[mod]
bar_positions = indices + bar_width*i
(markers, stemlines, baseline) = plt.stem(bar_positions, heights, linefmt='--')
plt.setp(markers, marker='o', markersize=7, color=color_dict[mod], alpha=alpha_dict[mod], label=mod)
plt.setp(stemlines, color=color_dict[mod], linewidth=1)
plt.setp(baseline, visible=False)
# Black line
plt.plot([0,bar_positions[-1]], [1,1],'--',color='.6', alpha=.6)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
if experiment == 'State':
ids = [id_[-3:] for id_ in ids]
plt.xticks(indices+bar_width*((len(models)-1)/2), ids)
plt.ylim([min_val*.95, max_val*1.05])
plt.xlim([-.3, bar_positions[-1]+.3])
if i == 0:
axes[i].legend(frameon=False, ncol=len(models))
plt.title('{0} barplot'.format(metric))
if mode == 'show':
plt.show()
elif mode == 'save':
if output_filename is None:
output_filename = '{0}_barplot'.format(metric)
plt.gcf().set_size_inches([6,15])
plt.savefig('{0}/{1}.{2}'.format(overview_path, output_filename, ext), format=ext)
plt.close()
def group_lollipop_ratio(ids, metric, period, models, benchmark, folder_main = None, color_dict=None, alpha_dict=None, metric_filename='metrics.csv', bar_separation_multiplier=1.5, mode='show', output_filename='LollipopTest', plot_domain=None, ext='png'):
"""
Plots the ratio of the metric score for each of the models against a benchmark in a lollipop plot.
Parameters
__________
id_: str
Identifier for the region to look for
metric: str
String containing the name of the metric to look for in the predictions file
period: str
Column name containing the values to plot
models: List, optional (default None)
String list containing the names of the models to plot
benchmark: str
The name within "models" which will serve as the benchmark
color_dict : dict
Dictionary containing specific colors for the models to plot
metric_filename : str, optional (default metrics.csv)
mode : str, optional (default is 'save')
If 'save', then function saves plot on the id_ specific folder.
if 'show, then function plots and used plt.show to pop up the plot real time'
alpha_dict : dict, optional (default is None)
dictionary specifying the opacity of the bars in the plot (alpha argument in matplotlib).
If set to None, then all opacities are set to 1
output_filename : str, optional (default is None)
If set to None, output_filename is set metricname_barplot
ext : str, optional (default is png)
Extension formal to save the barplot.
plot_domain : list, optional (default is [0,1])
list of two integers that sets the limits in the plot (plt.xlim)
bar_separation_multiplier : float, optional (default is 1)
Parameter that functions as multiplier for the separion between bars in the plot.
if set to 1, then bars are plotted in locations 1,2,3... if set to 2, then 2,4,6, etc
"""
if color_dict is None:
color_dict = dict(zip(models, ['b']*len(models)))
if alpha_dict is None:
alpha_dict = dict(zip(models, [1]*len(models)))
if folder_main is None:
folder_main = self.folder_main
values_dict = dict(zip(models, [[] for mod in models]))
min_val = float('inf')
max_val = float('-inf')
indices = []
overview_path = '{0}/{1}'.format(folder_main, '_overview')
for i, id_ in enumerate(ids):
indices.append(i*bar_separation_multiplier)
id_path = '{0}/{1}'.format(folder_main, id_)
df = pd.read_csv('{0}/{1}'.format(id_path, metric_filename))
df = df[df['METRIC']==metric]
for j, mod in enumerate(models):
ratio = copy.copy(df[df['MODEL']==mod][period].values[0]/df[df['MODEL']==benchmark][period].values[0])
if metric in ['ERROR', 'RMSE', 'NRMSE', 'MAPE']:
ratio=(1/ratio)
values_dict[mod].append(ratio)
if ratio < min_val:
min_val = ratio
if ratio > max_val:
max_val = ratio
bar_width = 1/len(models)
indices = np.array(indices)
for i, mod in enumerate(models):
heights = values_dict[mod]
bar_positions = indices + bar_width*i
(markers, stemlines, baseline) = plt.stem(bar_positions, heights, linefmt='--')
plt.setp(markers, marker='o', markersize=7, color=color_dict[mod], alpha=alpha_dict[mod], label=mod)
plt.setp(stemlines, color=color_dict[mod], linewidth=1)
plt.setp(baseline, visible=False)
# Black line
plt.plot([0,bar_positions[-1]], [1,1],'--',color='.6', alpha=.6)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.title('{0} barplot'.format(metric))
plt.xticks(indices+bar_width*((len(models)-1)/2), ids)
plt.ylim([min_val*.95, max_val*1.05])
plt.xlim([-.3, bar_positions[-1]+.3])
plt.legend(frameon=False, ncol=len(models))
if plot_domain:
plt.xlim(plot_domain)
if mode == 'show':
plt.show()
elif mode == 'save':
if output_filename is None:
output_filename = '{0}_barplot'.format(metric)
plt.gcf().set_size_inches([6,15])
plt.savefig('{0}/{1}.{2}'.format(overview_path, output_filename, ext), format=ext)
plt.close()
def inter_season_analysis(self,ids, main_folders, periods, series_names, metric = 'RMSE', filename='metrics_condensed.csv', output_filename='season_analysis', color_dict=None, alpha_dict=None, mode='save', ext='png'):
'''
Performs seasonal analysis of data based on periods decided from the user.
The top part of the plot shows violin plots (https://seaborn.pydata.org/generated/seaborn.violinplot.html)
and display the model's metric scores in a boxplot/distribution schemeself.
Bottom part shows a heatmap representing the distribution of ranking along all periods. I.E. If Each timeseries
case contain 4 periods and there's 4 cases, the total number of periods is 4*4 = 16. Each period has a metric for each model.
inter_season_analysis compare this metric within the period and ranks the models from first to nth place, and each place generats a +1
count onto the heatmap in the column representing the model and the row representing the rank.
__________
ids : dict
The dict of lists containing the identifiers for the regions.
main_folders : dict
The path to the experiments. Dictionary keys have to be consistent with the ids keys
periods : list
list containing the periods (should be available within the metrics table)
filename : str
String containing the filename to read the series from (using pandas).
start_period : str,
timeseries Pandas dataframe starting index.
end_period : str
timeseries ending index in the pandas dataframe.
n_col : int, optional (default is one)
series_names : list, optional (default is None)
Names of the timeseries to plot. If set to None, then model plots all of them.
output_filename : str, optional (default is series)
Name of the graphics file containing the plots.
color_dict : dict
Dictionary containing specific colors for the models to plot.
mode : str, optional (default is 'save')
If 'save', then function saves plot on the id_ specific folder.
if 'show, then function plots and used plt.show to pop up the plot real time'.
alpha_dict : dict, optional (default is None)
dictionary specifying the opacity of the bars in the plot (alpha argument in matplotlib).
If set to None, then all opacities are set to 1.
ext : str, optional (default is png)
Extension formal to save the graphics file.
Defines the number of columns to define the plotting array. Function organizes
plots in n_col then
'''
default_colors = ['royalblue', 'darkorange', 'forestgreen', 'firebrick']
if color_dict is None:
color_dict = dict(zip(series_names, default_colors[0:len(series_names)]))
score_periods = {}
ranks = {}
for title, ids_ in ids.items():
metrics_df = | pd.read_csv(main_folders[title] + '/_overview/'+ filename) | pandas.read_csv |
"""
Script to generate new train test splits
"""
import pandas as pd
import numpy as np
import random
import argparse
total_frames = 1950
def get_df_all(data_path):
names = ['path', 'x1', 'y1', 'x2', 'y2', 'class_name']
df_annotations_train = pd.read_csv('{}/annotation_train.csv'.format(data_path), names=names)
df_annotations_val = pd.read_csv('{}/annotation_val.csv'.format(data_path), names=names)
df_annotations_test = pd.read_csv('{}/annotation_test.csv'.format(data_path), names=names)
df_all = | pd.concat([df_annotations_train, df_annotations_val, df_annotations_test]) | pandas.concat |
import os
from datetime import datetime, timedelta, timezone
import logging
from typing import List
from itertools import repeat
import tarfile
import pandas as pd
import pyarrow.parquet as pq
import pyarrow.dataset as ds # put this later due to some numpy dependency
from suzieq.shared.utils import humanize_timestamp
from suzieq.shared.schema import SchemaForTable
from suzieq.db.parquet.migratedb import get_migrate_fn
class SqCoalesceState:
'''Class that coalesces parquet files'''
def __init__(self, logger: str, period: timedelta):
self.current_df = pd.DataFrame()
self.logger = logger if logger else logging.getLogger()
self.keys = None # filled as we tackle each table
self.schema = None # filled as we tackle each table
self.dbeng = None # filled as we tackle each table
self.period = period
self.prefix = 'sqc-h-' # sqc == suzieq coalesceer, h: hourly coalesce
self.ign_pfx = ['.', '_', 'sqc-'] # Prefixes to ignore for coalesceing
self.wrfile_count = 0
self.wrrec_count = 0
self.poller_periods = set()
self.block_start = self.block_end = 0
def pq_file_name(self, *args): # pylint: disable=unused-argument
"""Callback to create a filename that uses the timestamp of start
of hour. This makes it easy for us to lookup data when we need to.
:returns: parquet filename to write
:rtype: str
"""
# Using timestamp rather than date/time string to simplify reads
return f'{self.prefix}{self.block_start}-{self.block_end}.parquet'
def archive_coalesced_files(filelist: List[str], outfolder: str,
state: SqCoalesceState, dodel: bool) -> None:
"""Tars and removes the already coalesced files
:param filelist: List{str], list of files to be tarred and archived
:param outfolder: str, folder name where the archive is to be stored
:param state: SqCoalesceState, state of coalesceer
:param dodel: bool, True if the coalesced files must be deleted
:returns: Nothing
"""
if filelist and outfolder:
with tarfile.open(f'{outfolder}/_archive-{state.prefix}-'
f'{state.block_start}-{state.block_end}.tar.bz2',
'w:bz2') as f:
for file in filelist:
f.add(file)
if dodel:
# pylint: disable=expression-not-assigned
[os.remove(x) for x in filelist]
# pylint: disable=unused-argument
# Doing pylint override because I'm nervous about touching this fn
def write_files(table: str, filelist: List[str], in_basedir: str,
outfolder: str, partition_cols: List[str],
state: SqCoalesceState, block_start, block_end) -> None:
"""Write the data from the list of files out as a single coalesced block
We're fixing the compression in this function
:param table: str, Name of the table for which we're writing the files
:param filelist: List[str], list of files to write the data to
:param in_basedir: str, base directory of the read files,
to get partition date
:param outfolder: str, the outgoing folder to write the data to
:param partition_cols: List[str], partition columns
:param state: SqCoalesceState, coalescer state, for constructing filename
:param block_start: dateime, starting time window of this coalescing block
:param block_end: dateime, ending time window of this coalescing block
:returns: Nothing
"""
if not filelist and not state.schema.type == "record":
return
state.block_start = int(block_start.timestamp())
state.block_end = int(block_end.timestamp())
if filelist:
this_df = ds.dataset(source=filelist, partitioning='hive',
partition_base_dir=in_basedir) \
.to_table() \
.to_pandas()
state.wrrec_count += this_df.shape[0]
if not this_df.empty:
this_df = migrate_df(table, this_df, state.schema)
if state.schema.type == "record":
if not state.current_df.empty:
this_df = this_df.set_index(state.keys)
sett = set(this_df.index)
setc = set(state.current_df.index)
missing_set = setc.difference(sett)
if missing_set:
missing_df = state.current_df.loc[missing_set]
this_df = pd.concat([this_df.reset_index(),
missing_df.reset_index()])
else:
this_df = this_df.reset_index()
elif not state.current_df.empty:
this_df = state.current_df.reset_index()
else:
return
this_df.sqvers = state.schema.version # Updating the schema version
state.dbeng.write(state.table_name, "pandas", this_df, True,
state.schema.get_arrow_schema(),
state.pq_file_name)
if state.schema.type == "record" and filelist:
# Now replace the old dataframe with this new set for "record" types
# Non-record types should never have current_df non-empty
state.current_df = this_df.set_index(state.keys) \
.drop(columns=['index'], errors='ignore') \
.sort_values(by='timestamp') \
.query('~index.duplicated(keep="last")')
def find_broken_files(parent_dir: str) -> List[str]:
"""Find any files in the parent_dir that pyarrow can't read
:parame parent_dir: str, the parent directory to investigate
:returns: list of broken files
:rtype: list of strings
"""
all_files = []
broken_files = []
ro, _ = os.path.split(parent_dir)
for root, _, files in os.walk(parent_dir):
if ('_archived' not in root and '.sq-coalescer.pid' not in files
and len(files) > 0):
path = root.replace(ro, '')
all_files.extend(list(map(lambda x: f"{path}/{x}", files)))
for file in all_files:
try:
pq.ParquetFile(f"{ro}/{file}")
except pq.lib.ArrowInvalid:
broken_files.append(file)
return broken_files
def move_broken_files(parent_dir: str, state: SqCoalesceState,
out_dir: str = '_broken', ) -> None:
""" move any files that cannot be read by pyarrow in parent dir
to a safe directory to be investigated later
:param parent_dir: str, the parent directory to investigate
:param state: SqCoalesceState, needed for the logger
:param out_dir: str, diretory to put the broken files in
:returns: Nothing
:rtype: None
"""
broken_files = find_broken_files(parent_dir)
ro, _ = os.path.split(parent_dir)
for file in broken_files:
src = f"{ro}/{file}"
dst = f"{ro}/{out_dir}/{file}"
if not os.path.exists(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
state.logger.debug(f"moving broken file {src} to {dst}")
os.replace(src, dst)
def get_file_timestamps(filelist: List[str]) -> pd.DataFrame:
"""Read the files and construct a dataframe of files and timestamp of
record in them.
:param filelist: list, of full path name files, typically from pyarrow's
dataset.files
:returns: dataframe of filename with the time it represents, sorted
:rtype: pandas.DataFrame
"""
if not filelist:
return pd.DataFrame(columns=['file', 'timestamp'])
# We can't rely on the system istat time to find the times involved
# So read the data for each block and check. We tried using threading
# and it didn't dramatically alter the results. Given that we might've
# too many threads running with the poller and everything, we skipped
# doing it.
fname_list = []
fts_list = []
for file in filelist:
try:
ts = pd.read_parquet(file, columns=['timestamp'])
fts_list.append(ts.timestamp.min())
fname_list.append(file)
except OSError:
# skip this file because it can't be read, is probably 0 bytes
logging.debug(f"not reading timestamp for {file}")
# Construct file dataframe as its simpler to deal with
if fname_list:
fdf = | pd.DataFrame({'file': fname_list, 'timestamp': fts_list}) | pandas.DataFrame |
import os
import torch
import numpy as np
import pandas as pd
import torchvision
from torchvision import datasets, models as torch_models, transforms
import datetime
import time
import sys
import copy
import warnings
from metric_test_eval import MetricEmbeddingEvaluator, LogitEvaluator
import logging
logger = logging.getLogger(__name__)
def run(args):
if args.supress_warnings:
warnings.simplefilter("ignore")
def adjust_path(p):
return os.path.join(args.data_root_dir, p)
args.label_encoder = adjust_path(args.label_encoder)
args.all_imgs_csv = adjust_path(args.all_imgs_csv)
args.val_imgs_csv = adjust_path(args.val_imgs_csv)
args.test_imgs_csv = adjust_path(args.test_imgs_csv)
args.results_dir = adjust_path(args.results_dir)
print(args)
from multihead_trainer import train
from multihead_trainer import torch_transform
# TODO: consolidate logid
def build_logid_string(args, add_timestamp=True):
param_str = "lr{}_dr{}_lrpatience{}_lrfactor{}_{}".format(
args.init_lr, args.dropout, args.lr_patience,
args.lr_factor, args.appearance_network)
if add_timestamp:
param_str += "_" + datetime.datetime.now().strftime("%Y%m%d%H%M")
return param_str
param_str = build_logid_string(args)
# Azure ML
from azureml.core.run import Run
run = Run.get_context()
# log arguments if it's not called by train_cv
if not hasattr(args, 'folds_csv_dir'):
for k, v in vars(args).items():
run.tag(k, str(v))
save_path = os.path.join(args.results_dir, param_str)
os.makedirs(save_path, exist_ok=True)
print("save_path", save_path)
logger.info(f"cuda.is_available={torch.cuda.is_available()}, n_gpu={torch.cuda.device_count()}")
# encode the classes
from sklearn.preprocessing import LabelEncoder
import pickle
if not os.path.exists(args.label_encoder):
logger.warning(f"Fitting a new label encoder at {args.label_encoder}")
all_imgs_df = pd.read_csv(args.all_imgs_csv)
label_encoder = LabelEncoder()
label_encoder.fit(all_imgs_df['label'])
pickle.dump(label_encoder, open(args.label_encoder, "wb"))
else:
logger.info(f"Loading label encoder: {args.label_encoder}")
with open(args.label_encoder, 'rb') as pickle_file:
label_encoder = pickle.load(pickle_file)
logger.info(f"label_encoder.classes_={label_encoder.classes_}")
logger.info("The label encoder has {} classes.".format(len(label_encoder.classes_)))
# Load image list
all_images_df = | pd.read_csv(args.all_imgs_csv) | pandas.read_csv |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float), | Series([np.nan]) | pandas.Series |
import pandas as pd
import numpy as np
import pdfplumber
import json
import os
import re
import datetime
from utils import bdday_to_date, district_correction, \
district_th_to_en, find_similar_word
from get_pdf import ensure_pdf
THAIMONTH_TO_MONTH = {
"ม.ค.": "01",
"ก.พ.": "02",
"มี.ค.": "03",
"เม.ย.": "04",
"พ.ค.": "05",
"มิ.ย.": "06",
"ก.ค.": "07",
"ส.ค.": "08",
"ก.ย.": "09",
"ต.ค.": "10",
"พ.ย.": "11",
"ธ.ค.": "12",
}
PROVINCE_TH_TO_EN = json.load(open("../province_details/province-th-to-en.json", encoding="utf-8"))
PROVINCE_TO_DISTRICT = json.load(open("../province_details/th-province-district.json", encoding="utf-8"))
def find_cluster_page(pdf_path: str) -> set:
pages = set()
with pdfplumber.open(pdf_path) as pdf:
for page_no in range(len(pdf.pages)):
page_text = pdf.pages[page_no].extract_text(x_tolerance=3, y_tolerance=3)
if page_text == None:
continue
page_text = page_text.split("\n")
if "การระบาดที่พบในจังหวัดที่มีรายงานผู้ป่วยเพิ่มขึ้น" in page_text[0]:
pages.add(page_no)
return pages
def extract_cluster(pdf_path: str, pages: set) -> pd.DataFrame:
df = pd.DataFrame()
with pdfplumber.open(pdf_path) as pdf:
for i in pages:
pdf_page = pdf.pages[i]
table = pdf_page.extract_table(
table_settings={"vertical_strategy": "lines", "horizontal_strategy": "lines"})[1:]
df = df.append(table).reset_index(drop=True)
df = df.rename(columns={
0: "province_th",
1: "district_th",
2: "place_of_outbreak",
3: "start_date",
4: "new_cases",
5: "total"
})
df = df.replace(r'^\s*$', np.nan, regex=True)
df["province_th"] = df["province_th"].ffill()
df["total"] = df["total"].ffill()
df = df.dropna().reset_index(drop=True)
df["total"] = df["total"].str.split(expand=True)[0]
df_startdate = df["start_date"].str.split(expand=True)
df_startdate[1] = df_startdate[1].replace(THAIMONTH_TO_MONTH)
df_startdate[2] = "2021"
df["start_date"] = df_startdate[2] + "-" + df_startdate[1] + "-" + df_startdate[0].str.zfill(2)
# Replace newline in cell with space
regex_newline = re.compile(r"\n")
df.replace(regex_newline, " ", inplace=True)
# Correction for "ำ" cannot be read properly from PDF
regex_aum = re.compile(r" า")
df["place_of_outbreak"].replace(regex_aum, "ำ", inplace=True)
df["district_th"].replace(regex_aum, "ำ", inplace=True)
# Province name correction (Will cover "ำ" case as well)
province_th = PROVINCE_TH_TO_EN.keys()
df_invalid_province = df[(~df["province_th"].isin(province_th))]
if len(df_invalid_province) > 0:
# Replace by finding most similar province
df_invalid_correted = df_invalid_province["province_th"].apply(lambda pro: find_similar_word(pro, province_th))
df.update(df_invalid_correted)
print("Uncorrectable Province:")
print(df[(~df["province_th"].isin(province_th))])
df["province_en"] = df["province_th"].map(PROVINCE_TH_TO_EN)
df["province_en"] = df["province_en"].where( | pd.notnull(df["province_en"]) | pandas.notnull |
from datetime import datetime, timezone
import numpy as np
import pandas as pd
from suncalc import get_position, get_times
date = datetime(2013, 3, 5, tzinfo=timezone.utc)
lat = 50.5
lng = 30.5
height = 2000
testTimes = {
'solar_noon': '2013-03-05T10:10:57Z',
'nadir': '2013-03-04T22:10:57Z',
'sunrise': '2013-03-05T04:34:56Z',
'sunset': '2013-03-05T15:46:57Z',
'sunrise_end': '2013-03-05T04:38:19Z',
'sunset_start': '2013-03-05T15:43:34Z',
'dawn': '2013-03-05T04:02:17Z',
'dusk': '2013-03-05T16:19:36Z',
'nautical_dawn': '2013-03-05T03:24:31Z',
'nautical_dusk': '2013-03-05T16:57:22Z',
'night_end': '2013-03-05T02:46:17Z',
'night': '2013-03-05T17:35:36Z',
'golden_hour_end': '2013-03-05T05:19:01Z',
'golden_hour': '2013-03-05T15:02:52Z'}
heightTestTimes = {
'solar_noon': '2013-03-05T10:10:57Z',
'nadir': '2013-03-04T22:10:57Z',
'sunrise': '2013-03-05T04:25:07Z',
'sunset': '2013-03-05T15:56:46Z'}
def test_get_position():
"""getPosition returns azimuth and altitude for the given time and location
"""
pos = get_position(date, lng, lat)
assert np.isclose(pos['azimuth'], -2.5003175907168385)
assert np.isclose(pos['altitude'], -0.7000406838781611)
def test_get_times():
"""getTimes returns sun phases for the given date and location
"""
times = get_times(date, lng, lat)
for key, value in testTimes.items():
assert times[key].strftime("%Y-%m-%dT%H:%M:%SZ") == value
def test_get_times_height():
"""getTimes adjusts sun phases when additionally given the observer height
"""
times = get_times(date, lng, lat, height)
for key, value in heightTestTimes.items():
assert times[key].strftime("%Y-%m-%dT%H:%M:%SZ") == value
def test_get_position_pandas_single_timestamp():
ts_date = | pd.Timestamp(date) | pandas.Timestamp |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import skbio
import numpy as np
from biom.table import Table
import pandas as pd
import pandas.testing as pdt
from q2_feature_table import merge, merge_seqs, merge_taxa
from q2_feature_table._merge import _merge_feature_data, _get_overlapping
class MergeTableTests(unittest.TestCase):
def test_single_table(self):
t = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
obs = merge([t])
self.assertEqual(t, obs)
def test_valid_overlapping_feature_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S4', 'S5', 'S6'])
obs = merge([t1, t2])
exp = Table(np.array([[0, 1, 3, 0, 2, 6], [1, 1, 2, 0, 0, 0],
[0, 0, 0, 2, 2, 4]]),
['O1', 'O2', 'O3'],
['S1', 'S2', 'S3', 'S4', 'S5', 'S6'])
self.assertEqual(obs, exp)
def test_valid_non_overlapping_feature_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O3', 'O4'],
['S4', 'S5', 'S6'])
obs = merge([t1, t2])
exp = Table(np.array([[0, 1, 3, 0, 0, 0], [1, 1, 2, 0, 0, 0],
[0, 0, 0, 0, 2, 6], [0, 0, 0, 2, 2, 4]]),
['O1', 'O2', 'O3', 'O4'],
['S1', 'S2', 'S3', 'S4', 'S5', 'S6'])
self.assertEqual(obs, exp)
def test_invalid_overlapping_feature_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S4', 'S5', 'S6'])
with self.assertRaisesRegex(ValueError, 'features are present'):
merge([t1, t2], 'error_on_overlapping_feature')
def test_valid_overlapping_sample_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O3', 'O4'],
['S1', 'S5', 'S6'])
obs = merge([t1, t2], 'error_on_overlapping_feature')
exp = Table(np.array([[0, 1, 3, 0, 0], [1, 1, 2, 0, 0],
[0, 0, 0, 2, 6], [2, 0, 0, 2, 4]]),
['O1', 'O2', 'O3', 'O4'],
['S1', 'S2', 'S3', 'S5', 'S6'])
self.assertEqual(obs, exp)
def test_invalid_overlapping_sample_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S1', 'S5', 'S6'])
with self.assertRaisesRegex(ValueError, 'samples.*S1'):
merge([t1, t2])
def test_invalid_overlap_method(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S1', 'S5', 'S6'])
with self.assertRaisesRegex(ValueError, 'overlap method'):
merge([t1, t2], 'peanut')
def test_sum_full_overlap(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
obs = merge([t1, t2], 'sum')
exp = Table(np.array([[0, 3, 9], [3, 3, 6]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
self.assertEqual(obs, exp)
def test_sum_triple_overlap(self):
t1 = Table(np.array([[1, 1, 1], [1, 1, 1]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
obs = merge([t1] * 3, 'sum')
exp = Table(np.array([[3, 3, 3], [3, 3, 3]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
self.assertEqual(obs, exp)
def test_sum_some_overlap(self):
# Did I stutter?
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S4', 'S2', 'S5'])
obs = merge([t1, t2], 'sum')
exp = Table(np.array([[0, 3, 3, 0, 6], [1, 1, 2, 0, 0],
[0, 2, 0, 2, 4]]),
['O1', 'O2', 'O3'],
['S1', 'S2', 'S3', 'S4', 'S5'])
self.assertEqual(obs, exp)
def test_sum_overlapping_sample_ids(self):
# This should produce the same result as `error_on_overlapping_feature`
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O3', 'O4'],
['S1', 'S5', 'S6'])
obs = merge([t1, t2], 'sum')
exp = Table(np.array([[0, 1, 3, 0, 0], [1, 1, 2, 0, 0],
[0, 0, 0, 2, 6], [2, 0, 0, 2, 4]]),
['O1', 'O2', 'O3', 'O4'],
['S1', 'S2', 'S3', 'S5', 'S6'])
self.assertEqual(obs, exp)
def test_sum_overlapping_feature_ids(self):
# This should produce the same result as `error_on_overlapping_sample`
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S4', 'S5', 'S6'])
obs = merge([t1, t2], 'sum')
exp = Table(np.array([[0, 1, 3, 0, 2, 6], [1, 1, 2, 0, 0, 0],
[0, 0, 0, 2, 2, 4]]),
['O1', 'O2', 'O3'],
['S1', 'S2', 'S3', 'S4', 'S5', 'S6'])
self.assertEqual(obs, exp)
def test_average(self):
t1 = Table(np.array([[1, 1, 1], [1, 1, 1]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
obs = merge([t1] * 3, 'average')
exp = Table(np.array([[1, 1, 1], [1, 1, 1]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
self.assertEqual(obs, exp)
def test_average_relative_frequency(self):
t1 = Table(np.array([[0.75, 0.75, 0.75], [0.75, 0.75, 0.75]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0.25, 0.25, 0.25], [0.25, 0.25, 0.25]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
obs = merge([t1, t2], 'average')
exp = Table(np.array([[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
self.assertEqual(obs, exp)
class UtilTests(unittest.TestCase):
def test_get_overlapping(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'], ['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'], ['S1', 'S5', 'S6'])
# samples
obs = _get_overlapping([t1, t2], 'sample')
self.assertEqual(set(['S1']), obs)
# features
obs = _get_overlapping([t1, t2], 'observation')
self.assertEqual(set(['O1']), obs)
def test_get_overlapping_no_overlap(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'], ['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O3', 'O4'], ['S4', 'S5', 'S6'])
# samples
obs = _get_overlapping([t1, t2], 'sample')
self.assertEqual(set(), obs)
# features
obs = _get_overlapping([t1, t2], 'observation')
self.assertEqual(set(), obs)
def test_get_overlapping_multiple(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'], ['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'], ['S1', 'S5', 'S6'])
t3 = Table(np.array([[3, 3, 1], [0, 2, 1]]),
['O1', 'O2'], ['S1', 'S3', 'S6'])
# samples
obs = _get_overlapping([t1, t2, t3], 'sample')
self.assertEqual({'S1', 'S3', 'S6'}, obs)
# features
obs = _get_overlapping([t1, t2, t3], 'observation')
self.assertEqual({'O1', 'O2'}, obs)
class MergeFeatureDataTests(unittest.TestCase):
def test_merge_single(self):
d = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])
obs = _merge_feature_data([d])
pdt.assert_series_equal(obs, d)
def test_valid_overlapping_feature_ids(self):
d1 = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])
d2 = pd.Series(['ACGT', 'ACCA'], index=['f1', 'f3'])
obs = _merge_feature_data([d1, d2])
exp = pd.Series(['ACGT', 'ACCT', 'ACCA'], index=['f1', 'f2', 'f3'])
pdt.assert_series_equal(obs, exp)
def test_first_feature_data_retained(self):
d1 = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])
d2 = pd.Series(['ACGAAA', 'ACCA'], index=['f1', 'f3'])
obs = _merge_feature_data([d1, d2])
exp = pd.Series(['ACGT', 'ACCT', 'ACCA'], index=['f1', 'f2', 'f3'])
pdt.assert_series_equal(obs, exp)
# swapping input order changes f1 data
obs = _merge_feature_data([d2, d1])
exp = pd.Series(['ACGAAA', 'ACCT', 'ACCA'], index=['f1', 'f2', 'f3'])
pdt.assert_series_equal(obs, exp)
def test_multiple_overlapping_feature_ids_order_maintained(self):
d1 = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])
d2 = pd.Series(['ACGAAA', 'ACCA'], index=['f1', 'f3'])
d3 = pd.Series(['AGGA', 'ATAT'], index=['f3', 'f4'])
obs = _merge_feature_data([d1, d2, d3])
exp = pd.Series(['ACGT', 'ACCT', 'ACCA', 'ATAT'],
index=['f1', 'f2', 'f3', 'f4'])
pdt.assert_series_equal(obs, exp)
# swapping input order changes f1 and f3
obs = _merge_feature_data([d3, d2, d1])
exp = pd.Series(['ACGAAA', 'ACCT', 'AGGA', 'ATAT'],
index=['f1', 'f2', 'f3', 'f4'])
pdt.assert_series_equal(obs, exp)
def test_valid_non_overlapping_feature_ids(self):
d1 = | pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2']) | pandas.Series |
from collections import OrderedDict
import contextlib
from datetime import datetime, time
from functools import partial
import os
from urllib.error import URLError
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas.util.testing as tm
@contextlib.contextmanager
def ignore_xlrd_time_clock_warning():
"""
Context manager to ignore warnings raised by the xlrd library,
regarding the deprecation of `time.clock` in Python 3.7.
"""
with warnings.catch_warnings():
warnings.filterwarnings(
action="ignore",
message="time.clock has been deprecated",
category=DeprecationWarning,
)
yield
read_ext_params = [".xls", ".xlsx", ".xlsm", ".ods"]
engine_params = [
# Add any engines to test here
# When defusedxml is installed it triggers deprecation warnings for
# xlrd and openpyxl, so catch those here
pytest.param(
"xlrd",
marks=[
td.skip_if_no("xlrd"),
pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
],
),
pytest.param(
"openpyxl",
marks=[
td.skip_if_no("openpyxl"),
pytest.mark.filterwarnings("ignore:.*html argument"),
],
),
pytest.param(
None,
marks=[
td.skip_if_no("xlrd"),
pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
],
),
pytest.param("odf", marks=td.skip_if_no("odf")),
]
def _is_valid_engine_ext_pair(engine, read_ext: str) -> bool:
"""
Filter out invalid (engine, ext) pairs instead of skipping, as that
produces 500+ pytest.skips.
"""
engine = engine.values[0]
if engine == "openpyxl" and read_ext == ".xls":
return False
if engine == "odf" and read_ext != ".ods":
return False
if read_ext == ".ods" and engine != "odf":
return False
return True
def _transfer_marks(engine, read_ext):
"""
engine gives us a pytest.param objec with some marks, read_ext is just
a string. We need to generate a new pytest.param inheriting the marks.
"""
values = engine.values + (read_ext,)
new_param = pytest.param(values, marks=engine.marks)
return new_param
@pytest.fixture(
autouse=True,
params=[
_transfer_marks(eng, ext)
for eng in engine_params
for ext in read_ext_params
if _is_valid_engine_ext_pair(eng, ext)
],
)
def engine_and_read_ext(request):
"""
Fixture for Excel reader engine and read_ext, only including valid pairs.
"""
return request.param
@pytest.fixture
def engine(engine_and_read_ext):
engine, read_ext = engine_and_read_ext
return engine
@pytest.fixture
def read_ext(engine_and_read_ext):
engine, read_ext = engine_and_read_ext
return read_ext
class TestReaders:
@pytest.fixture(autouse=True)
def cd_and_set_engine(self, engine, datapath, monkeypatch):
"""
Change directory and set engine for read_excel calls.
"""
func = partial(pd.read_excel, engine=engine)
monkeypatch.chdir(datapath("io", "data", "excel"))
monkeypatch.setattr(pd, "read_excel", func)
def test_usecols_int(self, read_ext, df_ref):
df_ref = df_ref.reindex(columns=["A", "B", "C"])
# usecols as int
msg = "Passing an integer for `usecols`"
with pytest.raises(ValueError, match=msg):
with ignore_xlrd_time_clock_warning():
pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols=3)
# usecols as int
with pytest.raises(ValueError, match=msg):
with ignore_xlrd_time_clock_warning():
pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols=3
)
def test_usecols_list(self, read_ext, df_ref):
df_ref = df_ref.reindex(columns=["B", "C"])
df1 = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols=[0, 2, 3]
)
df2 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols=[0, 2, 3]
)
# TODO add index to xls file)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
def test_usecols_str(self, read_ext, df_ref):
df1 = df_ref.reindex(columns=["A", "B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A:D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A:D"
)
# TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = df_ref.reindex(columns=["B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A,C,D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A,C,D"
)
# TODO add index to xls file
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = df_ref.reindex(columns=["B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A,C:D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A,C:D"
)
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
@pytest.mark.parametrize(
"usecols", [[0, 1, 3], [0, 3, 1], [1, 0, 3], [1, 3, 0], [3, 0, 1], [3, 1, 0]]
)
def test_usecols_diff_positional_int_columns_order(self, read_ext, usecols, df_ref):
expected = df_ref[["A", "C"]]
result = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols=usecols
)
tm.assert_frame_equal(result, expected, check_names=False)
@pytest.mark.parametrize("usecols", [["B", "D"], ["D", "B"]])
def test_usecols_diff_positional_str_columns_order(self, read_ext, usecols, df_ref):
expected = df_ref[["B", "D"]]
expected.index = range(len(expected))
result = pd.read_excel("test1" + read_ext, "Sheet1", usecols=usecols)
tm.assert_frame_equal(result, expected, check_names=False)
def test_read_excel_without_slicing(self, read_ext, df_ref):
expected = df_ref
result = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str(self, read_ext, df_ref):
expected = df_ref[["C", "D"]]
result = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols="A,D:E"
)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str_invalid(self, read_ext):
msg = "Invalid column name: E1"
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, "Sheet1", usecols="D:E1")
def test_index_col_label_error(self, read_ext):
msg = "list indices must be integers.*, not str"
with pytest.raises(TypeError, match=msg):
pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=["A"], usecols=["A", "C"]
)
def test_index_col_empty(self, read_ext):
# see gh-9208
result = pd.read_excel("test1" + read_ext, "Sheet3", index_col=["A", "B", "C"])
expected = DataFrame(
columns=["D", "E", "F"],
index=MultiIndex(levels=[[]] * 3, codes=[[]] * 3, names=["A", "B", "C"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [None, 2])
def test_index_col_with_unnamed(self, read_ext, index_col):
# see gh-18792
result = pd.read_excel("test1" + read_ext, "Sheet4", index_col=index_col)
expected = DataFrame(
[["i1", "a", "x"], ["i2", "b", "y"]], columns=["Unnamed: 0", "col1", "col2"]
)
if index_col:
expected = expected.set_index(expected.columns[index_col])
tm.assert_frame_equal(result, expected)
def test_usecols_pass_non_existent_column(self, read_ext):
msg = (
"Usecols do not match columns, "
"columns expected but not found: " + r"\['E'\]"
)
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, usecols=["E"])
def test_usecols_wrong_type(self, read_ext):
msg = (
"'usecols' must either be list-like of "
"all strings, all unicode, all integers or a callable."
)
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, usecols=["E1", 0])
def test_excel_stop_iterator(self, read_ext):
parsed = pd.read_excel("test2" + read_ext, "Sheet1")
expected = DataFrame([["aaaa", "bbbbb"]], columns=["Test", "Test1"])
tm.assert_frame_equal(parsed, expected)
def test_excel_cell_error_na(self, read_ext):
parsed = pd.read_excel("test3" + read_ext, "Sheet1")
expected = DataFrame([[np.nan]], columns=["Test"])
tm.assert_frame_equal(parsed, expected)
def test_excel_table(self, read_ext, df_ref):
df1 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0)
df2 = pd.read_excel("test1" + read_ext, "Sheet2", skiprows=[1], index_col=0)
# TODO add index to file
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
df3 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
def test_reader_special_dtypes(self, read_ext):
expected = DataFrame.from_dict(
OrderedDict(
[
("IntCol", [1, 2, -3, 4, 0]),
("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]),
("BoolCol", [True, False, True, True, False]),
("StrCol", [1, 2, 3, 4, 5]),
# GH5394 - this is why convert_float isn't vectorized
("Str2Col", ["a", 3, "c", "d", "e"]),
(
"DateCol",
[
datetime(2013, 10, 30),
datetime(2013, 10, 31),
datetime(1905, 1, 1),
datetime(2013, 12, 14),
datetime(2015, 3, 14),
],
),
]
)
)
basename = "test_types"
# should read in correctly and infer types
actual = pd.read_excel(basename + read_ext, "Sheet1")
tm.assert_frame_equal(actual, expected)
# if not coercing number, then int comes in as float
float_expected = expected.copy()
float_expected["IntCol"] = float_expected["IntCol"].astype(float)
float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0
actual = pd.read_excel(basename + read_ext, "Sheet1", convert_float=False)
tm.assert_frame_equal(actual, float_expected)
# check setting Index (assuming xls and xlsx are the same here)
for icol, name in enumerate(expected.columns):
actual = pd.read_excel(basename + read_ext, "Sheet1", index_col=icol)
exp = expected.set_index(name)
tm.assert_frame_equal(actual, exp)
# convert_float and converters should be different but both accepted
expected["StrCol"] = expected["StrCol"].apply(str)
actual = pd.read_excel(
basename + read_ext, "Sheet1", converters={"StrCol": str}
)
tm.assert_frame_equal(actual, expected)
no_convert_float = float_expected.copy()
no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str)
actual = pd.read_excel(
basename + read_ext,
"Sheet1",
convert_float=False,
converters={"StrCol": str},
)
tm.assert_frame_equal(actual, no_convert_float)
# GH8212 - support for converters and missing values
def test_reader_converters(self, read_ext):
basename = "test_converters"
expected = DataFrame.from_dict(
OrderedDict(
[
("IntCol", [1, 2, -3, -1000, 0]),
("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]),
("BoolCol", ["Found", "Found", "Found", "Not found", "Found"]),
("StrCol", ["1", np.nan, "3", "4", "5"]),
]
)
)
converters = {
"IntCol": lambda x: int(x) if x != "" else -1000,
"FloatCol": lambda x: 10 * x if x else np.nan,
2: lambda x: "Found" if x != "" else "Not found",
3: lambda x: str(x) if x else "",
}
# should read in correctly and set types of single cells (not array
# dtypes)
actual = pd.read_excel(basename + read_ext, "Sheet1", converters=converters)
tm.assert_frame_equal(actual, expected)
def test_reader_dtype(self, read_ext):
# GH 8212
basename = "testdtype"
actual = pd.read_excel(basename + read_ext)
expected = DataFrame(
{
"a": [1, 2, 3, 4],
"b": [2.5, 3.5, 4.5, 5.5],
"c": [1, 2, 3, 4],
"d": [1.0, 2.0, np.nan, 4.0],
}
).reindex(columns=["a", "b", "c", "d"])
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(
basename + read_ext, dtype={"a": "float64", "b": "float32", "c": str}
)
expected["a"] = expected["a"].astype("float64")
expected["b"] = expected["b"].astype("float32")
expected["c"] = ["001", "002", "003", "004"]
tm.assert_frame_equal(actual, expected)
with pytest.raises(ValueError):
pd.read_excel(basename + read_ext, dtype={"d": "int64"})
@pytest.mark.parametrize(
"dtype,expected",
[
(
None,
DataFrame(
{
"a": [1, 2, 3, 4],
"b": [2.5, 3.5, 4.5, 5.5],
"c": [1, 2, 3, 4],
"d": [1.0, 2.0, np.nan, 4.0],
}
),
),
(
{"a": "float64", "b": "float32", "c": str, "d": str},
DataFrame(
{
"a": Series([1, 2, 3, 4], dtype="float64"),
"b": Series([2.5, 3.5, 4.5, 5.5], dtype="float32"),
"c": ["001", "002", "003", "004"],
"d": ["1", "2", np.nan, "4"],
}
),
),
],
)
def test_reader_dtype_str(self, read_ext, dtype, expected):
# see gh-20377
basename = "testdtype"
actual = pd.read_excel(basename + read_ext, dtype=dtype)
tm.assert_frame_equal(actual, expected)
def test_reading_all_sheets(self, read_ext):
# Test reading all sheetnames by setting sheetname to None,
# Ensure a dict is returned.
# See PR #9450
basename = "test_multisheet"
dfs = pd.read_excel(basename + read_ext, sheet_name=None)
# ensure this is not alphabetical to test order preservation
expected_keys = ["Charlie", "Alpha", "Beta"]
tm.assert_contains_all(expected_keys, dfs.keys())
# Issue 9930
# Ensure sheet order is preserved
assert expected_keys == list(dfs.keys())
def test_reading_multiple_specific_sheets(self, read_ext):
# Test reading specific sheetnames by specifying a mixed list
# of integers and strings, and confirm that duplicated sheet
# references (positions/names) are removed properly.
# Ensure a dict is returned
# See PR #9450
basename = "test_multisheet"
# Explicitly request duplicates. Only the set should be returned.
expected_keys = [2, "Charlie", "Charlie"]
dfs = pd.read_excel(basename + read_ext, sheet_name=expected_keys)
expected_keys = list(set(expected_keys))
tm.assert_contains_all(expected_keys, dfs.keys())
assert len(expected_keys) == len(dfs.keys())
def test_reading_all_sheets_with_blank(self, read_ext):
# Test reading all sheetnames by setting sheetname to None,
# In the case where some sheets are blank.
# Issue #11711
basename = "blank_with_header"
dfs = pd.read_excel(basename + read_ext, sheet_name=None)
expected_keys = ["Sheet1", "Sheet2", "Sheet3"]
tm.assert_contains_all(expected_keys, dfs.keys())
# GH6403
def test_read_excel_blank(self, read_ext):
actual = pd.read_excel("blank" + read_ext, "Sheet1")
tm.assert_frame_equal(actual, DataFrame())
def test_read_excel_blank_with_header(self, read_ext):
expected = DataFrame(columns=["col_1", "col_2"])
actual = pd.read_excel("blank_with_header" + read_ext, "Sheet1")
tm.assert_frame_equal(actual, expected)
def test_date_conversion_overflow(self, read_ext):
# GH 10001 : pandas.ExcelFile ignore parse_dates=False
expected = pd.DataFrame(
[
[pd.Timestamp("2016-03-12"), "<NAME>"],
[pd.Timestamp("2016-03-16"), "<NAME>"],
[1e20, "<NAME>"],
],
columns=["DateColWithBigInt", "StringCol"],
)
if pd.read_excel.keywords["engine"] == "openpyxl":
pytest.xfail("Maybe not supported by openpyxl")
result = pd.read_excel("testdateoverflow" + read_ext)
tm.assert_frame_equal(result, expected)
def test_sheet_name(self, read_ext, df_ref):
filename = "test1"
sheet_name = "Sheet1"
df1 = pd.read_excel(
filename + read_ext, sheet_name=sheet_name, index_col=0
) # doc
with ignore_xlrd_time_clock_warning():
df2 = pd.read_excel(filename + read_ext, index_col=0, sheet_name=sheet_name)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
def test_excel_read_buffer(self, read_ext):
pth = "test1" + read_ext
expected = pd.read_excel(pth, "Sheet1", index_col=0)
with open(pth, "rb") as f:
actual = pd.read_excel(f, "Sheet1", index_col=0)
tm.assert_frame_equal(expected, actual)
def test_bad_engine_raises(self, read_ext):
bad_engine = "foo"
with pytest.raises(ValueError, match="Unknown engine: foo"):
pd.read_excel("", engine=bad_engine)
@tm.network
def test_read_from_http_url(self, read_ext):
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/master/"
"pandas/tests/io/data/excel/test1" + read_ext
)
url_table = pd.read_excel(url)
local_table = pd.read_excel("test1" + read_ext)
tm.assert_frame_equal(url_table, local_table)
@td.skip_if_not_us_locale
def test_read_from_s3_url(self, read_ext, s3_resource):
# Bucket "pandas-test" created in tests/io/conftest.py
with open("test1" + read_ext, "rb") as f:
s3_resource.Bucket("pandas-test").put_object(Key="test1" + read_ext, Body=f)
url = "s3://pandas-test/test1" + read_ext
url_table = pd.read_excel(url)
local_table = pd.read_excel("test1" + read_ext)
tm.assert_frame_equal(url_table, local_table)
@pytest.mark.slow
# ignore warning from old xlrd
@pytest.mark.filterwarnings("ignore:This metho:PendingDeprecationWarning")
def test_read_from_file_url(self, read_ext, datapath):
# FILE
localtable = os.path.join(datapath("io", "data", "excel"), "test1" + read_ext)
local_table = pd.read_excel(localtable)
try:
url_table = pd.read_excel("file://localhost/" + localtable)
except URLError:
# fails on some systems
import platform
pytest.skip("failing on {}".format(" ".join(platform.uname()).strip()))
tm.assert_frame_equal(url_table, local_table)
def test_read_from_pathlib_path(self, read_ext):
# GH12655
from pathlib import Path
str_path = "test1" + read_ext
expected = pd.read_excel(str_path, "Sheet1", index_col=0)
path_obj = Path("test1" + read_ext)
actual = pd.read_excel(path_obj, "Sheet1", index_col=0)
tm.assert_frame_equal(expected, actual)
@td.skip_if_no("py.path")
@td.check_file_leaks
def test_read_from_py_localpath(self, read_ext):
# GH12655
from py.path import local as LocalPath
str_path = os.path.join("test1" + read_ext)
expected = pd.read_excel(str_path, "Sheet1", index_col=0)
path_obj = LocalPath().join("test1" + read_ext)
actual = pd.read_excel(path_obj, "Sheet1", index_col=0)
tm.assert_frame_equal(expected, actual)
def test_reader_seconds(self, read_ext):
# Test reading times with and without milliseconds. GH5945.
expected = DataFrame.from_dict(
{
"Time": [
time(1, 2, 3),
time(2, 45, 56, 100000),
time(4, 29, 49, 200000),
time(6, 13, 42, 300000),
time(7, 57, 35, 400000),
time(9, 41, 28, 500000),
time(11, 25, 21, 600000),
time(13, 9, 14, 700000),
time(14, 53, 7, 800000),
time(16, 37, 0, 900000),
time(18, 20, 54),
]
}
)
actual = pd.read_excel("times_1900" + read_ext, "Sheet1")
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel("times_1904" + read_ext, "Sheet1")
tm.assert_frame_equal(actual, expected)
def test_read_excel_multiindex(self, read_ext):
# see gh-4679
mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]])
mi_file = "testmultiindex" + read_ext
# "mi_column" sheet
expected = DataFrame(
[
[1, 2.5, pd.Timestamp("2015-01-01"), True],
[2, 3.5, pd.Timestamp("2015-01-02"), False],
[3, 4.5, pd.Timestamp("2015-01-03"), False],
[4, 5.5, pd.Timestamp("2015-01-04"), True],
],
columns=mi,
)
actual = pd.read_excel(mi_file, "mi_column", header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
# "mi_index" sheet
expected.index = mi
expected.columns = ["a", "b", "c", "d"]
actual = pd.read_excel(mi_file, "mi_index", index_col=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
# "both" sheet
expected.columns = mi
actual = pd.read_excel(mi_file, "both", index_col=[0, 1], header=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
# "mi_index_name" sheet
expected.columns = ["a", "b", "c", "d"]
expected.index = mi.set_names(["ilvl1", "ilvl2"])
actual = pd.read_excel(mi_file, "mi_index_name", index_col=[0, 1])
tm.assert_frame_equal(actual, expected)
# "mi_column_name" sheet
expected.index = list(range(4))
expected.columns = mi.set_names(["c1", "c2"])
actual = pd.read_excel(mi_file, "mi_column_name", header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
# see gh-11317
# "name_with_int" sheet
expected.columns = mi.set_levels([1, 2], level=1).set_names(["c1", "c2"])
actual = pd.read_excel(mi_file, "name_with_int", index_col=0, header=[0, 1])
tm.assert_frame_equal(actual, expected)
# "both_name" sheet
expected.columns = mi.set_names(["c1", "c2"])
expected.index = mi.set_names(["ilvl1", "ilvl2"])
actual = pd.read_excel(mi_file, "both_name", index_col=[0, 1], header=[0, 1])
| tm.assert_frame_equal(actual, expected) | pandas.util.testing.assert_frame_equal |
import numpy as np
import pandas as pd
from . import util as DataUtil
from . import cols as DataCol
"""
The main data loader.
TODO: population & common special dates
"""
class DataCenter:
def __init__(self):
self.__kabko = None
self.__dates_global = pd.DataFrame([], columns=DataCol.DATES_GLOBAL)
self.__dates_local = pd.DataFrame([], columns=DataCol.DATES_LOCAL)
self.__date_names_global = np.array([])
self.__date_names_local = np.array([])
self.__population_global = None
self.__covid_local = None
self.raw_global = None
self.data_global = None
def load_covid_local(
self,
df,
kabko_col="kabko",
date_col="date",
rename_cols={
"infected": DataCol.I,
"infectious": DataCol.I,
"recovered": DataCol.R,
"dead": DataCol.D
},
drop_cols=["infected_total"],
drop_first_col=False,
exclude_kabkos=[
"AWAK BUAH KAPAL",
"RS LAPANGAN INDRAPURA"
]
):
df = df.copy()
labels = [DataCol.I, DataCol.R, DataCol.D]
df.loc[:, date_col] = pd.to_datetime(df[date_col])
drop_cols = [df.columns[0], *drop_cols] if drop_first_col else drop_cols
df.drop(columns=drop_cols, axis=1, inplace=True)
df.drop(df.index[df[kabko_col].isin(exclude_kabkos)], inplace=True)
rename_cols = {
kabko_col: DataCol.KABKO,
date_col: DataCol.DATE,
**rename_cols
}
df.rename(columns=rename_cols, inplace=True)
df.loc[:, labels] = df[labels].astype(DataUtil.DEFAULT_DTYPE)
self.__covid_local = df
self.__kabko = df[kabko_col].unique()
return self.__covid_local
@property
def kabko(self):
if self.__kabko is None:
if self.__covid_local is None:
raise Exception("Please set/load covid data first")
self.load_covid_local(self.__covid_local)
return self.__kabko
@property
def covid_local(self):
return self.__covid_local
def load_vaccine(
self,
df,
date_col="date",
labels_orig=[
"people_vaccinated",
"people_fully_vaccinated"
]
):
if labels_orig:
df = df[[date_col, *labels_orig]]
df = df.copy()
df.loc[:, date_col] = | pd.to_datetime(df[date_col]) | pandas.to_datetime |
import contextlib
import logging
import os
import psutil
import re
import shutil
from packaging import version
import pandas as pd
import h2o
from h2o.automl import H2OAutoML
from frameworks.shared.callee import FrameworkError, call_run, output_subdir, result
from frameworks.shared.utils import Monitoring, Namespace as ns, Timer, clean_dir, touch, zip_path
log = logging.getLogger(__name__)
class BackendMemoryMonitoring(Monitoring):
def __init__(self, name=None, interval_seconds=300, check_on_exit=False,
verbosity=0, log_level=logging.INFO):
super().__init__(name, interval_seconds, check_on_exit, "backend_monitoring_")
self._verbosity = verbosity
self._log_level = log_level
def _check_state(self):
sd = h2o.cluster().get_status_details()
log.log(self._log_level, "System memory (bytes): %s", psutil.virtual_memory())
log.log(self._log_level, "DKV: %s MB; Other: %s MB", sd['mem_value_size'][0] >> 20, sd['pojo_mem'][0] >> 20)
def run(dataset, config):
log.info(f"\n**** H2O AutoML [v{h2o.__version__}] ****\n")
# Mapping of benchmark metrics to H2O metrics
metrics_mapping = dict(
acc='mean_per_class_error',
auc='AUC',
logloss='logloss',
mae='mae',
mse='mse',
r2='r2',
rmse='rmse',
rmsle='rmsle'
)
sort_metric = metrics_mapping[config.metric] if config.metric in metrics_mapping else None
if sort_metric is None:
# TODO: Figure out if we are going to blindly pass metrics through, or if we use a strict mapping
log.warning("Performance metric %s not supported, defaulting to AUTO.", config.metric)
try:
training_params = {k: v for k, v in config.framework_params.items() if not k.startswith('_')}
nthreads = config.framework_params.get('_nthreads', config.cores)
jvm_memory = str(round(config.max_mem_size_mb * 2/3))+"M" # leaving 1/3rd of available memory for XGBoost
log.info("Starting H2O cluster with %s cores, %s memory.", nthreads, jvm_memory)
max_port_range = 49151
min_port_range = 1024
rnd_port = os.getpid() % (max_port_range-min_port_range) + min_port_range
port = config.framework_params.get('_port', rnd_port)
init_params = config.framework_params.get('_init', {})
if "logs" in config.framework_params.get('_save_artifacts', []):
init_params['ice_root'] = output_subdir("logs", config)
h2o.init(nthreads=nthreads,
port=port,
min_mem_size=jvm_memory,
max_mem_size=jvm_memory,
**init_params)
import_kwargs = dict(escapechar='\\')
# Load train as an H2O Frame, but test as a Pandas DataFrame
log.debug("Loading train data from %s.", dataset.train.path)
train = None
if version.parse(h2o.__version__) >= version.parse("3.32.1"): # previous versions may fail to parse correctly some rare arff files using single quotes as enum/string delimiters (pandas also fails on same datasets)
import_kwargs['quotechar'] = '"'
train = h2o.import_file(dataset.train.path, destination_frame=frame_name('train', config), **import_kwargs)
if train.nlevels() != dataset.domains.cardinalities:
h2o.remove(train)
train = None
import_kwargs['quotechar'] = "'"
if not train:
train = h2o.import_file(dataset.train.path, destination_frame=frame_name('train', config), **import_kwargs)
# train.impute(method='mean')
log.debug("Loading test data from %s.", dataset.test.path)
test = h2o.import_file(dataset.test.path, destination_frame=frame_name('test', config), **import_kwargs)
# test.impute(method='mean')
log.info("Running model on task %s, fold %s.", config.name, config.fold)
log.debug("Running H2O AutoML with a maximum time of %ss on %s core(s), optimizing %s.",
config.max_runtime_seconds, config.cores, sort_metric)
aml = H2OAutoML(max_runtime_secs=config.max_runtime_seconds,
sort_metric=sort_metric,
seed=config.seed,
**training_params)
monitor = (BackendMemoryMonitoring(interval_seconds=config.ext.monitoring.interval_seconds,
check_on_exit=True,
verbosity=config.ext.monitoring.verbosity)
if config.framework_params.get('_monitor_backend', False)
# else contextlib.nullcontext # Py 3.7+ only
else contextlib.contextmanager(lambda: (_ for _ in (0,)))()
)
with Timer() as training:
with monitor:
aml.train(y=dataset.target.index, training_frame=train)
if not aml.leader:
raise FrameworkError("H2O could not produce any model in the requested time.")
with Timer() as predict:
preds = aml.predict(test)
preds = extract_preds(preds, test, dataset=dataset)
save_artifacts(aml, dataset=dataset, config=config)
return result(
output_file=config.output_predictions_file,
predictions=preds.predictions,
truth=preds.truth,
probabilities=preds.probabilities,
probabilities_labels=preds.probabilities_labels,
models_count=len(aml.leaderboard),
training_duration=training.duration,
predict_duration=predict.duration
)
finally:
con = h2o.connection()
if con:
# h2o.remove_all()
con.close()
if con.local_server:
con.local_server.shutdown()
# if h2o.cluster():
# h2o.cluster().shutdown()
def frame_name(fr_type, config):
return '_'.join([fr_type, config.name, str(config.fold)])
def save_artifacts(automl, dataset, config):
artifacts = config.framework_params.get('_save_artifacts', ['leaderboard'])
try:
models_artifacts = []
lb = automl.leaderboard.as_data_frame()
log.debug("Leaderboard:\n%s", lb.to_string())
if 'leaderboard' in artifacts:
models_dir = output_subdir("models", config)
lb_path = os.path.join(models_dir, "leaderboard.csv")
write_csv(lb, lb_path)
models_artifacts.append(lb_path)
models_pat = re.compile(r"models(\[(json|binary|mojo)(?:,(\d+))?\])?")
models = list(filter(models_pat.fullmatch, artifacts))
for m in models:
models_dir = output_subdir("models", config)
all_models_se = next((mid for mid in lb['model_id'] if mid.startswith("StackedEnsemble_AllModels")),
None)
match = models_pat.fullmatch(m)
mformat = match.group(2) or 'json'
topN = int(match.group(3) or -1)
if topN < 0 and mformat != 'json' and all_models_se:
models_artifacts.append(save_model(all_models_se, dest_dir=models_dir, mformat=mformat))
else:
count = 0
for mid in lb['model_id']:
if topN < 0 or count < topN:
save_model(mid, dest_dir=models_dir, mformat=mformat)
count += 1
else:
break
models_archive = os.path.join(models_dir, f"models_{mformat}.zip")
zip_path(models_dir, models_archive, filtr=lambda p: p not in models_artifacts)
models_artifacts.append(models_archive)
clean_dir(models_dir,
filtr=lambda p: p not in models_artifacts
and os.path.splitext(p)[1] in ['.json', '.zip', ''])
if 'model_predictions' in artifacts:
predictions_dir = output_subdir("predictions", config)
test = h2o.get_frame(frame_name('test', config))
for mid in lb['model_id']:
model = h2o.get_model(mid)
h2o_preds = model.predict(test)
preds = extract_preds(h2o_preds, test, dataset=dataset)
if preds.probabilities_labels is None:
preds.probabilities_labels = preds.h2o_labels
write_preds(preds, os.path.join(predictions_dir, mid, 'predictions.csv'))
predictions_zip = os.path.join(predictions_dir, "model_predictions.zip")
zip_path(predictions_dir, predictions_zip)
clean_dir(predictions_dir, filtr=lambda p: os.path.isdir(p))
if 'logs' in artifacts:
logs_dir = output_subdir("logs", config)
logs_zip = os.path.join(logs_dir, "h2o_logs.zip")
zip_path(logs_dir, logs_zip)
# h2o.download_all_logs(dirname=logs_dir)
clean_dir(logs_dir, filtr=lambda p: p != logs_zip)
except Exception:
log.debug("Error when saving artifacts.", exc_info=True)
def save_model(model_id, dest_dir='.', mformat='json'):
model = h2o.get_model(model_id)
if mformat == 'mojo':
return model.save_mojo(path=dest_dir)
# model.download_mojo(path=dest_dir, get_genmodel_jar=True)
elif mformat == 'binary':
return h2o.save_model(model, path=dest_dir)
# return h2o.download_model(model, path=dest_dir)
else:
return model.save_model_details(path=dest_dir)
def extract_preds(h2o_preds, test, dataset, ):
h2o_preds = h2o_preds.as_data_frame(use_pandas=False)
preds = to_data_frame(h2o_preds[1:], columns=h2o_preds[0])
y_pred = preds.iloc[:, 0]
h2o_truth = test[:, dataset.target.index].as_data_frame(use_pandas=False, header=False)
y_truth = to_data_frame(h2o_truth)
predictions = y_pred.values
probabilities = preds.iloc[:, 1:].values
prob_labels = h2o_labels = h2o_preds[0][1:]
if all([re.fullmatch(r"p(-?\d)+", p) for p in prob_labels]):
# for categories represented as numerical values, h2o prefixes the probabilities columns with p
# in this case, we let the app setting the labels to avoid mismatch
prob_labels = None
truth = y_truth.values
return ns(predictions=predictions,
truth=truth,
probabilities=probabilities,
probabilities_labels=prob_labels,
h2o_labels=h2o_labels)
def write_preds(preds, path):
df = to_data_frame(preds.probabilities, columns=preds.probabilities_labels)
df = df.assign(predictions=preds.predictions)
df = df.assign(truth=preds.truth)
write_csv(df, path)
def to_data_frame(arr, columns=None):
return | pd.DataFrame.from_records(arr, columns=columns) | pandas.DataFrame.from_records |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 27 17:50:08 2021
@author: Dropex
"""
import TAClass
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def smas_graph(symbol,interval,exchange):
asset = TAClass.TradeAsset(symbol,interval,exchange)
asset.getklines()
asset.dataframe()
asset.addsma(asset.df, 10)
asset.addsma(asset.df, 20)
asset.addsma(asset.df, 50)
asset.addsma(asset.df, 100)
asset.addsma(asset.df, 200)
opentime_df= | pd.to_datetime(asset.df['OpenTime'], unit='ms') | pandas.to_datetime |
import pandas as pd
import numpy as np
from scipy.stats.mstats import theilslopes
# Custom exception
class NoValidIntervalError(Exception):
'''raised when no valid rows appear in the result grame'''
pass
class pm_frame(pd.DataFrame):
'''Class consisting of dataframe for analysis constructed from system data, usually created with create_pm_frame'''
def calc_result_frame(self, trim=True):
'''Return a result_frame
Returns a result_frame which contains the charecteristics of each soiling interval.soiling.
An updated version of the pm_frame is stored as self.pm_frame.
Parameters
----------
trim (bolean): whether to trim (remove) the first and last soiling intervals to avoid inclusion of partial intervals
'''
# Estimate slope of each soiling interval, store results in a dataframe
result_list = []
if trim:
res_loop = sorted(list(set(self['run'])))[1:-1] # ignore first and last interval
else:
res_loop = sorted(list(set(self['run'])))
for r in res_loop:
run = self[self.run == r]
length = (run.day[-1] - run.day[0])
start_day = run.day[0]
end_day = run.day[-1]
run = run[run.pi_norm > 0]
if len(run) > 2 and run.pi_norm.sum() > 0:
fit = theilslopes(run.pi_norm, run.day)
fit_poly = np.poly1d(fit[0:2])
result_list.append({
'start': run.index[0],
'end': run.index[-1],
'length': length,
'run': r,
'run_slope': fit[0],
'run_slope_low': fit[2],
'run_slope_high': min([0.0, fit[3]]),
'max_neg_step': min(run.delta),
'start_loss': 1,
'clean_wo_precip': run.clean_wo_precip[0],
'inferred_start_loss': fit_poly(start_day),
'inferred_end_loss': fit_poly(end_day),
'valid': True
})
else:
run = self[self.run == r]
result_list.append({
'start': run.index[0],
'end': run.index[-1],
'length': length,
'run': r,
'run_slope': 0,
'run_slope_low': 0,
'run_slope_high': 0,
'max_neg_step': min(run.delta),
'start_loss': 1,
'clean_wo_precip': run.clean_wo_precip[0],
'inferred_start_loss': run.pi_norm.mean(),
'inferred_end_loss': run.pi_norm.mean(),
'valid': False
})
results = | pd.DataFrame(result_list) | pandas.DataFrame |
import requests
import re
import ipaddress
import pandas as pd
import openpyxl
from tkinter import *
from tkinter import filedialog
import tkinter.messagebox
import os
import configparser
from openpyxl.styles import Border, Side
from openpyxl.formatting.rule import ColorScaleRule, FormulaRule
config = configparser.ConfigParser()
config.read(r"API_KEY.ini")
api_key = config.get('Auth', 'API_KEY')
filepath = None
GUI = Tk()
def checkip(IP):
if ipaddress.ip_address(IP).is_private is False:
abipdbheaders = {
'Key': api_key,
'Accept': 'application/json',
}
abipdbparams = {
'maxAgeInDays': 1,
'ipAddress': IP,
}
req = requests.get("https://api.abuseipdb.com/api/v2/check", headers = abipdbheaders, params = abipdbparams)
resp = req.json()
if 'errors' not in resp:
return resp["data"]
else:
exit()
else:
return (f"{IP} is private")
def filterip(ipin) :
ipregex = re.compile(r'(?:\d{1,3}\.)+(?:\d{1,3})')
ipa = re.search(ipregex, ipin)
return ipa.group(0)
def checkipfromfile(infile):
iplist = []
output = []
f1 = open(infile, 'r')
tmp = f1.readlines()
for i in tmp:
if i == '' or i == " " or i == "198.19.250.1" or i == "\n":
pass
else:
iplist.append(filterip(i))
for i in iplist:
output.append(checkip(i))
f1.close()
return output
def checkipfrominput(in1):
iplist2 = []
output2 = []
tmp2 = in1.split('\n')
for i in tmp2:
if i == '' or i == " " or i == "198.19.250.1" or i == "\n":
pass
else:
iplist2.append(filterip(i))
for i in iplist2:
output2.append(checkip(i))
return output2
def get_report(input):
concdict = {
k: [d.get(k) for d in input if k in d]
for k in set().union(*input)
}
IpaddList = concdict.get("ipAddress")
AbusescoreList = concdict.get("abuseConfidenceScore")
PublicList = concdict.get("isPublic")
IpverList = concdict.get("ipVersion")
IswlList = concdict.get("isWhitelisted")
CountrycList = concdict.get("countryCode")
UsageList = concdict.get("usageType")
IspList = concdict.get("isp")
DomainList = concdict.get("domain")
TotalreportsList = concdict.get("totalReports")
LastreportList = concdict.get("lastReportedAt")
wb = openpyxl.Workbook()
ws = wb.active
ws['A1'] = 'ipAddress'
ws['B1'] = 'abuseConfidenceScore'
ws['C1'] = 'isPublic'
ws['D1'] = 'ipVersion'
ws['E1'] = 'isWhitelisted'
ws['F1'] = 'countryCode'
ws['G1'] = 'usageType'
ws['H1'] = 'isp'
ws['I1'] = 'domain'
ws['J1'] = 'totalReports'
ws['K1'] = 'lastReportedAt'
border_style = Border(left=Side(style='thin'),
right=Side(style='thin'),
top=Side(style='thin'),
bottom=Side(style='thin'))
clrrule = ColorScaleRule(start_type= 'num', start_value='0',start_color='00B050', mid_type= 'num', mid_value='25', mid_color='FCA904', end_type='num', end_value='100', end_color='CC0000')
ws.conditional_formatting.add('B2:B500', clrrule)
ws.conditional_formatting.add('A1:K500', FormulaRule(formula=['NOT(ISBLANK(A1))'], stopIfTrue=False, border=border_style))
dataframeIpaddList = | pd.DataFrame({'ipAddress': IpaddList}) | pandas.DataFrame |
import webbrowser
import pandas as pd
import sqlite3
import matplotlib.pyplot as plt
import folium
def top_ties(data, num, sort_by='summ'):
"""
A function that handles top ties problem.
:param sort_by: the name of columns which dataframe was sorted by
:param data: pandas dataframe type that contains sorted value
:param num: the number of values that we want to get
:return: a pandas dataframe type contains top nums number of value with ties
"""
while True:
if num == len(data):
return data
# check nth value with n+1th value, if they are equal, consider a tie
if data.iloc[num - 1][sort_by] != data.iloc[num][sort_by]:
break
else:
num += 1
return data.iloc[:num]
def task1():
"""
Task 1 implement a function that allow users to enter the range of year
and crime type.
the program will generate a bar plot to show the month-wise total count of
the user input crime type
"""
start_year = int(input("Enter start year (YYYY):"))
end_year = int(input("Enter end year (YYYY):"))
crime_type = input("Enter crime type:")
crime_type = "\'" + crime_type + "\'"
command = "select crime_incidents.Month, sum(crime_incidents.Incidents_Count) as count " \
"from crime_incidents " \
"where crime_incidents.Crime_Type = {} and crime_incidents.Year >= {} and crime_incidents.Year <= {} " \
"group by crime_incidents.Month;".format(crime_type, start_year, end_year)
data = pd.read_sql_query(command, conn)
# add month with count = 0 into data frame at correct position ( from 1 to 12)
for i in range(1, 13):
if not any(data.Month == i):
insert_row = pd.DataFrame([[i, 0]], columns=['Month', 'count'])
# split the data frame at the position where we want to insert a new row
# then concat them together
if i > 1:
above = data.loc[:i - 1]
below = data.loc[i:]
data = pd.concat([above, insert_row, below], ignore_index=True)
elif i == 1:
below = data.loc[0:]
data = pd.concat([insert_row, below], ignore_index=True)
data.plot.bar(x="Month")
plt.plot()
plt.show()
def task2():
"""
Task 2 implement a function that allow users to enter a integer
the program will generate a map to show the top n populous and
the lowest n not populous along with their population count.
"""
m = folium.Map(location=[53.5444, -113.323], zoom_start=11)
num = int(input('Enter number of locations:'))
# this sql query gives us n area with most population and n area with lowest population
command1 = 'select Latitude,Longitude,population.Neighbourhood_Name,' \
'(CANADIAN_CITIZEN+NON_CANADIAN_CITIZEN+NO_RESPONSE)as summ ' \
'from population,coordinates ' \
'where summ != 0 and population.Neighbourhood_Name=coordinates.Neighbourhood_Name ' \
'and coordinates.Latitude!=0 and coordinates.Longitude!=0 ' \
'group by population.Neighbourhood_Name ' \
'order by summ;'
command2 = 'select Latitude,Longitude,population.Neighbourhood_Name,' \
'(CANADIAN_CITIZEN+NON_CANADIAN_CITIZEN+NO_RESPONSE)as summ ' \
'from population,coordinates ' \
'where summ != 0 and population.Neighbourhood_Name=coordinates.Neighbourhood_Name ' \
'and coordinates.Latitude!=0 and coordinates.Longitude!=0 ' \
'group by population.Neighbourhood_Name ' \
'order by summ DESC ; '
data1 = pd.read_sql_query(command1, conn)
data2 = pd.read_sql_query(command2, conn)
while num > len(data1):
print('The number you input is larger than the number of actual locations({})! '
'Please input a new value!'.format(len(data1)))
num = int(input("Enter number of locations:"))
data1 = top_ties(data1, num)
data2 = top_ties(data2, num)
data = pd.concat([data1, data2], ignore_index=True)
for i in range(0, len(data)):
row = data.iloc[i]
folium.Circle(location=[row["Latitude"], row["Longitude"]],
popup="{} <br> Population: {}".format(row["Neighbourhood_Name"], row["summ"]),
radius=int(row["summ"]) / 10, color="crimson", fill=True, fill_color="crimson").add_to(m)
m.save('task2.html')
# open html file
webbrowser.open("task2.html")
def task3():
"""
Task 3 implement a function that allow users to enter the range of years
,crime type and also the number of the neighborhoods.
the program will generate a map to show the top n neighborhoods and their
crime count where the given crime type occurred most within the given range
"""
start_year = int(input("Enter start year (YYYY):"))
end_year = int(input("Enter end year (YYYY):"))
crime_type = input("Enter crime type:")
crime_type = "\'" + crime_type + "\'"
num = int(input("Enter number of neighborhood:"))
m = folium.Map(location=[53.5444, -113.323], zoom_start=11)
'''
This is the version without ties handling.
command = 'select crime_incidents.Neighbourhood_Name,sum(crime_incidents.Incidents_Count)' \
'as summ, coordinates.Latitude, coordinates.Longitude ' \
'from crime_incidents, coordinates ' \
'where crime_incidents.Crime_Type = {} and crime_incidents.Year between {} and {} ' \
'and coordinates.Neighbourhood_Name=crime_incidents.Neighbourhood_Name ' \
'group by crime_incidents.Neighbourhood_Name ' \
'order by summ DESC ' \
'limit {}'.format(crime_type, start_year, end_year, num)
'''
command = 'select crime_incidents.Neighbourhood_Name,sum(crime_incidents.Incidents_Count)' \
'as summ, coordinates.Latitude, coordinates.Longitude ' \
'from crime_incidents, coordinates ' \
'where crime_incidents.Crime_Type = {} and crime_incidents.Year between {} and {} ' \
'and coordinates.Neighbourhood_Name=crime_incidents.Neighbourhood_Name ' \
'and coordinates.Latitude!=0 and coordinates.Longitude!=0 ' \
'group by crime_incidents.Neighbourhood_Name ' \
'order by summ DESC ' \
';'.format(crime_type, start_year, end_year)
data = pd.read_sql_query(command, conn)
'''
# check nth value with n+1th value, if they are equal, consider a tie
while True:
if data.iloc[num - 1]['summ'] != data.iloc[num]['summ']:
break
else:
num += 1
data = data.iloc[:num]
'''
# check if user input is too big
while num > len(data):
print('The number you input is larger than the number of actual neighborhoods({})! '
'Please input a new value!'.format(len(data)))
num = int(input("Enter number of neighborhood:"))
data = top_ties(data, num)
# print(data)
for i in range(0, len(data)):
row = data.iloc[i]
folium.Circle(location=[row["Latitude"], row["Longitude"]],
popup="{} <br> Population: {}".format(row["Neighbourhood_Name"], row["summ"]),
radius=int(row["summ"]), color="crimson", fill=True, fill_color="crimson").add_to(m)
m.save('task3.html')
# open html file
webbrowser.open("task3.html")
def task4():
"""
Task 4 implement a function that allow users to enter the range of years
and also the number of the neighborhoods.
the program will generate a map to show the top n neighborhoods to show
population ratio within the provided range
also the map will the also showed the most frequent crime type in each of
these neighborhoods
"""
start_year = int(input("Enter start year (YYYY):"))
end_year = int(input("Enter end year (YYYY):"))
num = int(input("Enter number of neighborhood:"))
m = folium.Map(location=[53.5444, -113.323], zoom_start=11)
command = 'select population.Neighbourhood_Name,(population.CANADIAN_CITIZEN+' \
'population.NON_CANADIAN_CITIZEN+population.NO_RESPONSE)as summ,' \
'sum(crime_incidents.Incidents_Count) as summ_c,coordinates.Latitude,coordinates.Longitude ' \
'from population, crime_incidents,coordinates ' \
'where summ != 0 and crime_incidents.Year between {} and {} ' \
'and population.Neighbourhood_Name = crime_incidents.Neighbourhood_Name' \
' and population.Neighbourhood_Name=coordinates.Neighbourhood_Name ' \
'and coordinates.Latitude!=0 and coordinates.Longitude!=0 ' \
'group by population.Neighbourhood_Name'.format(start_year, end_year)
data = pd.read_sql_query(command, conn)
while num > len(data):
print('The number you input is larger than the number of actual neighborhoods({})! '
'Please input a new value!'.format(len(data)))
num = int(input("Enter number of neighborhood:"))
data['summ'] = data['summ'].astype(int)
data['summ_c'] = data['summ_c'].astype(int)
data['c_rate'] = data['summ_c'] / data['summ']
# print(data)
# sort the data by c_rate with descending order
data.sort_values(by=['c_rate'], inplace=True, ascending=False)
"""
while True:
if data.iloc[num - 1]['c_rate'] != data.iloc[num]['c_rate']:
break
else:
num += 1
data = data.iloc[:num]
"""
data = top_ties(data, num, sort_by='c_rate')
for i in range(0, len(data)):
row = data.iloc[i]
# crime_type = data2[data2['Neighbourhood_Name'] == row["Neighbourhood_Name"]].Crime_Type.item()
crime_type = get_crime_type(row["Neighbourhood_Name"], start_year, end_year)
name = row["Neighbourhood_Name"]
folium.Circle(location=[row["Latitude"], row["Longitude"]],
popup="{}<br>{}<br>{}".format(name, crime_type, row["c_rate"]),
radius=row["c_rate"] * 1000, color="crimson", fill=True, fill_color="crimson").add_to(m)
m.save('task4.html')
webbrowser.open("task4.html")
def get_crime_type(neighborhood, start_year, end_year):
"""
This function returns a string that indicate most frequent crime type with ties
(returns all if the count of a crime type is the same) of a certain area.
:param neighborhood: a string type param which is the name of an area
:param start_year: start_year for query
:param end_year: end_year for query
:return: a string that indicate crime type
"""
nei_name = "\'" + neighborhood + "\'"
command = 'select t1.Neighbourhood_Name as Neighbourhood_Name,t1.Crime_Type as Crime_Type,t1.summ as summ ' \
'from(select crime_incidents.Neighbourhood_Name,crime_incidents.' \
'Crime_Type,sum( crime_incidents.Incidents_Count)as summ ' \
'from crime_incidents ' \
'where crime_incidents.Year between {} and {} ' \
'group by crime_incidents.Neighbourhood_Name,crime_incidents.Crime_Type)as t1 ' \
'where t1.Neighbourhood_Name={} ' \
'order by t1.summ desc'.format(start_year, end_year, nei_name)
data = | pd.read_sql_query(command, conn) | pandas.read_sql_query |
import numpy as np
import pandas as pd
from app import db
from app.fetcher.fetcher import Fetcher
from app.models import OckovaniLide
class VaccinatedFetcher(Fetcher):
"""
Class for updating vaccinated people table.
"""
VACCINATED_CSV = 'https://onemocneni-aktualne.mzcr.cz/api/v2/covid-19/ockovani-profese.csv'
def __init__(self):
super().__init__(OckovaniLide.__tablename__, self.VACCINATED_CSV)
def fetch(self, import_id: int) -> None:
usecols = ['datum', 'vakcina', 'kraj_nuts_kod', 'orp_bydliste_kod', 'zarizeni_kod', 'zarizeni_nazev',
'poradi_davky', 'vekova_skupina']
df = pd.read_csv(self._url, dtype={'zarizeni_kod': 'object'},
usecols=lambda c: c.startswith('indikace_') or c in usecols)
self._log_download_finished()
df['orp_bydl_kod'] = df['orp_bydliste_kod'].replace({np.nan: '-'}).astype(str).str[:4]
df['zarizeni_kod'] = df['zarizeni_kod'].str.zfill(11)
df['indikace_zdravotnik'] = df['indikace_zdravotnik'].fillna(False).astype('bool')
df['indikace_socialni_sluzby'] = df['indikace_socialni_sluzby'].fillna(False).astype('bool')
df['indikace_ostatni'] = df['indikace_ostatni'].fillna(False).astype('bool')
df['indikace_pedagog'] = df['indikace_pedagog'].fillna(False).astype('bool')
df['indikace_skolstvi_ostatni'] = df['indikace_skolstvi_ostatni'].fillna(False).astype('bool')
df['indikace_bezpecnostni_infrastruktura'] = df['indikace_bezpecnostni_infrastruktura'].fillna(False).astype('bool')
df['indikace_chronicke_onemocneni'] = df['indikace_chronicke_onemocneni'].fillna(False).astype('bool')
orp = pd.read_sql_query('select uzis_orp orp_bydl_kod, kraj_nuts kraj_bydl_nuts from obce_orp', db.engine)
df = | pd.merge(df, orp, how='left') | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 7 09:40:49 2018
@author: yuwei
"""
import pandas as pd
import numpy as np
import math
import random
import time
import scipy as sp
import xgboost as xgb
def loadData():
"下载数据"
trainSet = pd.read_table('round1_ijcai_18_train_20180301.txt',sep=' ')
testSet = pd.read_table('round1_ijcai_18_test_a_20180301.txt',sep=' ')
return trainSet,testSet
def splitData(trainSet,testSet):
"按时间划分验证集"
#转化测试集时间戳为标准时间
time_local = testSet.context_timestamp.map(lambda x :time.localtime(x))
time_local = time_local.map(lambda x :time.strftime("%Y-%m-%d %H:%M:%S",x))
testSet['context_timestamp'] = time_local
#转化训练集时间戳为标准时间
time_local = trainSet.context_timestamp.map(lambda x :time.localtime(x))
time_local = time_local.map(lambda x :time.strftime("%Y-%m-%d %H:%M:%S",x))
trainSet['context_timestamp'] = time_local
del time_local
#处理训练集item_category_list属性
trainSet['item_category_list'] = trainSet.item_category_list.map(lambda x :x.split(';'))
trainSet['item_category_list_2'] = trainSet.item_category_list.map(lambda x :x[1])
trainSet['item_category_list_3'] = trainSet.item_category_list.map(lambda x :x[2] if len(x) >2 else -1)
trainSet['item_category_list_2'] = list(map(lambda x,y : x if (y == -1) else y,trainSet['item_category_list_2'],trainSet['item_category_list_3']))
#处理测试集item_category_list属性
testSet['item_category_list'] = testSet.item_category_list.map(lambda x :x.split(';'))
testSet['item_category_list_2'] = testSet.item_category_list.map(lambda x :x[1])
testSet['item_category_list_3'] = testSet.item_category_list.map(lambda x :x[2] if len(x) >2 else -1)
testSet['item_category_list_2'] = list(map(lambda x,y : x if (y == -1) else y,testSet['item_category_list_2'],testSet['item_category_list_3']))
del trainSet['item_category_list_3'];del testSet['item_category_list_3'];
#处理predict_category_property的排名
trainSet['predict_category'] = trainSet['predict_category_property'].map(lambda x :[y.split(':')[0] for y in x.split(';')])
trainSet['predict_category_property_rank'] = list(map(lambda x,y:y.index(x) if x in y else -1,trainSet['item_category_list_2'],trainSet['predict_category']))
testSet['predict_category'] = testSet['predict_category_property'].map(lambda x :[y.split(':')[0] for y in x.split(';')])
testSet['predict_category_property_rank'] = list(map(lambda x,y:y.index(x) if x in y else -1,testSet['item_category_list_2'],testSet['predict_category']))
#统计item_category_list中和predict_category共同的个数
trainSet['item_category_count'] = list(map(lambda x,y:len(set(x)&set(y)),trainSet.item_category_list,trainSet.predict_category))
testSet['item_category_count'] = list(map(lambda x,y:len(set(x)&set(y)),testSet.item_category_list,testSet.predict_category))
#不同个数
trainSet['item_category_count'] = list(map(lambda x,y:len(set(x)) - len(set(x)&set(y)),trainSet.item_category_list,trainSet.predict_category))
testSet['item_category_count'] = list(map(lambda x,y:len(set(x)) - len(set(x)&set(y)),testSet.item_category_list,testSet.predict_category))
del trainSet['predict_category']; del testSet['predict_category']
"划分数据集"
#测试集 23-24号特征提取,25号打标
test = testSet
testFeat = trainSet[trainSet['context_timestamp']>'2018-09-23']
#验证集 22-23号特征提取,24号打标
validate = trainSet[trainSet['context_timestamp']>'2018-09-24']
validateFeat = trainSet[(trainSet['context_timestamp']>'2018-09-22') & (trainSet['context_timestamp']<'2018-09-24')]
#训练集 21-22号特征提取,23号打标;20-21号特征提取,22号打标;19-20号特征提取,21号打标;18-19号特征提取,20号打标
#标签区间
train1 = trainSet[(trainSet['context_timestamp']>'2018-09-23') & (trainSet['context_timestamp']<'2018-09-24')]
train2 = trainSet[(trainSet['context_timestamp']>'2018-09-22') & (trainSet['context_timestamp']<'2018-09-23')]
train3 = trainSet[(trainSet['context_timestamp']>'2018-09-21') & (trainSet['context_timestamp']<'2018-09-22')]
train4 = trainSet[(trainSet['context_timestamp']>'2018-09-20') & (trainSet['context_timestamp']<'2018-09-21')]
#特征区间
trainFeat1 = trainSet[(trainSet['context_timestamp']>'2018-09-21') & (trainSet['context_timestamp']<'2018-09-23')]
trainFeat2 = trainSet[(trainSet['context_timestamp']>'2018-09-20') & (trainSet['context_timestamp']<'2018-09-22')]
trainFeat3 = trainSet[(trainSet['context_timestamp']>'2018-09-19') & (trainSet['context_timestamp']<'2018-09-21')]
trainFeat4 = trainSet[(trainSet['context_timestamp']>'2018-09-18') & (trainSet['context_timestamp']<'2018-09-20')]
return test,testFeat,validate,validateFeat,train1,trainFeat1,train2,trainFeat2,train3,trainFeat3,train4,trainFeat4
def modelXgb(train,test):
"xgb模型"
train_y = train['is_trade'].values
# train_x = train.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade'
# ],axis=1).values
# test_x = test.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade'
# ],axis=1).values
# test_x = test.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property'
# ],axis=1).values
#根据皮卡尔相关系数,drop相关系数低于-0.2的属性
train_x = train.drop(['item_brand_id',
'item_city_id','user_id','shop_id','context_id',
'instance_id', 'item_id','item_category_list',
'item_property_list', 'context_timestamp',
'predict_category_property','is_trade',
'item_price_level','user_rank_down',
'item_category_list_2_not_buy_count',
'item_category_list_2_count',
'user_first'
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service',
],axis=1).values
# test_x = test.drop(['item_brand_id',
# 'item_city_id','user_id','shop_id','context_id',
# 'instance_id', 'item_id','item_category_list',
# 'item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade',
# 'item_price_level','user_rank_down',
# 'item_category_list_2_not_buy_count',
# 'item_category_list_2_count',
# 'user_first',
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service'
# ],axis=1).values
test_x = test.drop(['item_brand_id',
'item_city_id','user_id','shop_id','context_id',
'instance_id', 'item_id','item_category_list',
'item_property_list', 'context_timestamp',
'predict_category_property',
'item_price_level','user_rank_down',
'item_category_list_2_not_buy_count',
'item_category_list_2_count',
'user_first',
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service'
],axis=1).values
dtrain = xgb.DMatrix(train_x, label=train_y)
dtest = xgb.DMatrix(test_x)
# 模型参数
params = {'booster': 'gbtree',
'objective':'binary:logistic',
'eval_metric':'logloss',
'eta': 0.03,
'max_depth': 5, # 6
'colsample_bytree': 0.8,#0.8
'subsample': 0.8,
'scale_pos_weight': 1,
'min_child_weight': 18 # 2
}
# 训练
watchlist = [(dtrain,'train')]
bst = xgb.train(params, dtrain, num_boost_round=700,evals=watchlist)
# 预测
predict = bst.predict(dtest)
# test_xy = test[['instance_id','is_trade']]
test_xy = test[['instance_id']]
test_xy['predicted_score'] = predict
return test_xy
def get_item_feat(data,dataFeat):
"item的特征提取"
result = pd.DataFrame(dataFeat['item_id'])
result = result.drop_duplicates(['item_id'],keep='first')
"1.统计item出现次数"
dataFeat['item_count'] = dataFeat['item_id']
feat = pd.pivot_table(dataFeat,index=['item_id'],values='item_count',aggfunc='count').reset_index()
del dataFeat['item_count']
result = pd.merge(result,feat,on=['item_id'],how='left')
"2.统计item历史被购买的次数"
dataFeat['item_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_id'],values='item_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_buy_count']
result = pd.merge(result,feat,on=['item_id'],how='left')
"3.统计item转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_buy_count,result.item_count))
result['item_buy_ratio'] = buy_ratio
"4.统计item历史未被够买的次数"
result['item_not_buy_count'] = result['item_count'] - result['item_buy_count']
return result
def get_user_feat(data,dataFeat):
"user的特征提取"
result = pd.DataFrame(dataFeat['user_id'])
result = result.drop_duplicates(['user_id'],keep='first')
"1.统计user出现次数"
dataFeat['user_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id'],values='user_count',aggfunc='count').reset_index()
del dataFeat['user_count']
result = pd.merge(result,feat,on=['user_id'],how='left')
"2.统计user历史被购买的次数"
dataFeat['user_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id'],values='user_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_buy_count']
result = | pd.merge(result,feat,on=['user_id'],how='left') | pandas.merge |
from __future__ import print_function
import os
import csv
import gidcommon as gc
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
import random
import numpy as np
import pandas as pd
os.chdir(gc.datadir)
email = '<EMAIL>'
password = '<PASSWORD>'
driver = webdriver.Firefox()
driver.get("http://www.gideononline.com")
sleep(random.lognormvariate(1.5,0.5))
driver.find_element_by_name("email").send_keys(email)
driver.find_element_by_name("password").send_keys(password, Keys.RETURN)
sleep(random.lognormvariate(2.5,0.5))
# Set up our dict of series
diseases_by_country = {}
driver.get("http://web.gideononline.com/web/epidemiology/")
country_menu = driver.find_element_by_name('country')
countries = map(lambda x:x.text, country_menu.find_elements_by_tag_name('option'))
for country in countries:
print(country)
driver.find_element_by_xpath("//*[contains(text(), '" +
country +
"')]").click()
print("Sleeping...")
sleep(random.lognormvariate(1.5,0.5))
menulayer = driver.find_element_by_id('menuLayer')
diseases = menulayer.find_elements_by_tag_name('a')
diseases = map(lambda x: x.text, diseases)
print(diseases)
print("Making a series for this country")
diseases = pd.Series(1., index=diseases)
print("Assigning that series to the diseases_by_country dict.")
diseases_by_country[country] = diseases
diseases_by_country = | pd.DataFrame(diseases_by_country) | pandas.DataFrame |
import pandas as pd
import numpy as np
data_path = "/home/clairegayral/Documents/openclassroom/data/P4/"
res_path = "/home/clairegayral/Documents/openclassroom/res/P4/"
from sklearn import preprocessing
from sklearn.impute import KNNImputer
###################
#### open data ####
###################
product_category_name_translation = pd.read_csv(data_path
+ "product_category_name_translation.csv")
sellers = pd.read_csv(data_path + "olist_sellers_dataset.csv")
products = pd.read_csv(data_path + "olist_products_dataset.csv")
orders = pd.read_csv(data_path + "olist_orders_dataset.csv")
order_reviews = pd.read_csv(data_path + "olist_order_reviews_dataset.csv")
order_payments = pd.read_csv(data_path + "olist_order_payments_dataset.csv")
order_items = pd.read_csv(data_path + "olist_order_items_dataset.csv")
geolocation = pd.read_csv(data_path + "olist_geolocation_dataset.csv")
customers = | pd.read_csv(data_path + "olist_customers_dataset.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Copyright (c) German Cancer Research Center,
Division of Medical Image Computing.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: <NAME>
Mail: <EMAIL>
"""
import numpy as np
import pandas as pd
import noisyFeaturesArtificalDataset as nfad
no_of_samples=150
meaningfull_variables=20
overall_variables=4000
oversampling_factor=6
data=nfad.generate_two_class_problem_with_unimportant(no_of_samples,
meaningfull_variables,
overall_variables-meaningfull_variables,
[-.5,.5],[-.5,.5],[0.3,2],[0.3,2],[0.3,2],
seed=21)
data=nfad.generate_validation_set(1000,data)
train_data=np.concatenate((data.samples_A, data.samples_B))
noise_data=nfad.draw_samples_from_distribution(np.zeros(train_data.shape[1]),data.stddev_noise,1)
train_data=train_data+noise_data
data.noise_samples=train_data
data.validation_y=data.validation_y.reshape([-1,1])
data.noise_labels=data.noise_labels.reshape([-1,1])
df_noisefree_training=pd.DataFrame(nfad.add_gaussian_noise_to_samples(data.samples, None, data.stddev_noise))
df_noisefree_training['Y']=pd.Series(data.noise_labels.reshape([-1]))
df_noisefree_training['Groups']=pd.Series(range(data.noise_labels.shape[0]))
df_gwn_training=pd.DataFrame(data.noise_samples)
df_gwn_training['Y']=pd.Series(data.noise_labels.reshape([-1])*2-1)
df_gwn_training['Groups']=pd.Series(range(data.noise_labels.shape[0]))
df_gwn_validation= | pd.DataFrame(data.validation_x) | pandas.DataFrame |
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
| :class:`pandas.Series` functions and operators implementations in SDC
| Also, it contains Numba internal operators which are required for Series type handling
"""
import numba
import numpy
import operator
import pandas
import math
import sys
from numba.errors import TypingError
from numba.extending import overload, overload_method, overload_attribute
from numba.typing import signature
from numba.extending import intrinsic
from numba import (types, numpy_support, cgutils)
from numba.typed import Dict
from numba import prange
import sdc
import sdc.datatypes.common_functions as common_functions
from sdc.datatypes.common_functions import (TypeChecker, check_index_is_numeric, find_common_dtype_from_numpy_dtypes,
sdc_join_series_indexes)
from sdc.datatypes.hpat_pandas_series_rolling_types import _hpat_pandas_series_rolling_init
from sdc.datatypes.hpat_pandas_stringmethods_types import StringMethodsType
from sdc.datatypes.hpat_pandas_getitem_types import SeriesGetitemAccessorType
from sdc.hiframes.pd_series_type import SeriesType
from sdc.str_arr_ext import (StringArrayType, string_array_type, str_arr_is_na, str_arr_set_na,
num_total_chars, pre_alloc_string_array, cp_str_list_to_array)
from sdc.utils import to_array, sdc_overload, sdc_overload_method, sdc_overload_attribute
from sdc.datatypes import hpat_pandas_series_autogenerated
@sdc_overload(operator.getitem)
def hpat_pandas_series_accessor_getitem(self, idx):
"""
Pandas Series operator :attr:`pandas.Series.get` implementation
**Algorithm**: result = series[idx]
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_static_getitem_series1
Parameters
----------
series: :obj:`pandas.Series`
input series
idx: :obj:`int`, :obj:`slice` or :obj:`pandas.Series`
input index
Returns
-------
:class:`pandas.Series` or an element of the underneath type
object of :class:`pandas.Series`
"""
_func_name = 'Operator getitem().'
if not isinstance(self, SeriesGetitemAccessorType):
return None
accessor = self.accessor.literal_value
if accessor == 'iloc':
if isinstance(idx, (types.List, types.Array, types.SliceType)):
def hpat_pandas_series_iloc_list_slice_impl(self, idx):
result_data = self._series._data[idx]
result_index = self._series.index[idx]
return pandas.Series(result_data, result_index, self._series._name)
return hpat_pandas_series_iloc_list_slice_impl
if isinstance(idx, (int, types.Integer)):
def hpat_pandas_series_iloc_impl(self, idx):
return self._series._data[idx]
return hpat_pandas_series_iloc_impl
def hpat_pandas_series_iloc_callable_impl(self, idx):
index = numpy.asarray(list(map(idx, self._series._data)))
return pandas.Series(self._series._data[index], self._series.index[index], self._series._name)
return hpat_pandas_series_iloc_callable_impl
raise TypingError('{} The index must be an Integer, Slice or List of Integer or a callable.\
Given: {}'.format(_func_name, idx))
if accessor == 'iat':
if isinstance(idx, (int, types.Integer)):
def hpat_pandas_series_iat_impl(self, idx):
return self._series._data[idx]
return hpat_pandas_series_iat_impl
raise TypingError('{} The index must be a Integer. Given: {}'.format(_func_name, idx))
if accessor == 'loc':
# Note: Loc return Series
# Note: Index 0 in slice not supported
# Note: Loc slice and callable with String not implement
index_is_none = (self.series.index is None or
isinstance(self.series.index, numba.types.misc.NoneType))
if isinstance(idx, types.SliceType) and index_is_none:
def hpat_pandas_series_loc_slice_noidx_impl(self, idx):
max_slice = sys.maxsize
start = idx.start
stop = idx.stop
if idx.stop == max_slice:
stop = max_slice - 1
result_data = self._series._data[start:stop+1]
result_index = numpy.arange(start, stop + 1)
return pandas.Series(result_data, result_index, self._series._name)
return hpat_pandas_series_loc_slice_noidx_impl
if isinstance(idx, (int, types.Integer, types.UnicodeType, types.StringLiteral)):
def hpat_pandas_series_loc_impl(self, idx):
index = self._series.index
mask = numpy.empty(len(self._series._data), numpy.bool_)
for i in numba.prange(len(index)):
mask[i] = index[i] == idx
return pandas.Series(self._series._data[mask], index[mask], self._series._name)
return hpat_pandas_series_loc_impl
raise TypingError('{} The index must be an Number, Slice, String, List, Array or a callable.\
Given: {}'.format(_func_name, idx))
if accessor == 'at':
if isinstance(idx, (int, types.Integer, types.UnicodeType, types.StringLiteral)):
def hpat_pandas_series_at_impl(self, idx):
index = self._series.index
mask = numpy.empty(len(self._series._data), numpy.bool_)
for i in numba.prange(len(index)):
mask[i] = index[i] == idx
return self._series._data[mask]
return hpat_pandas_series_at_impl
raise TypingError('{} The index must be a Number or String. Given: {}'.format(_func_name, idx))
raise TypingError('{} Unknown accessor. Only "loc", "iloc", "at", "iat" are supported.\
Given: {}'.format(_func_name, accessor))
@sdc_overload(operator.getitem)
def hpat_pandas_series_getitem(self, idx):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.get
Limitations
-----------
Supported ``key`` can be one of the following:
- Integer scalar, e.g. :obj:`series[0]`
- A slice, e.g. :obj:`series[2:5]`
- Another series
Examples
--------
.. literalinclude:: ../../../examples/series_getitem.py
:language: python
:lines: 27-
:caption: Getting Pandas Series elements
:name: ex_series_getitem
.. command-output:: python ./series_getitem.py
:cwd: ../../../examples
.. todo:: Fix SDC behavior and add the expected output of the > python ./series_getitem.py to the docstring
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series operator :attr:`pandas.Series.get` implementation
**Algorithm**: result = series[idx]
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_static_getitem_series1
Parameters
----------
series: :obj:`pandas.Series`
input series
idx: :obj:`int`, :obj:`slice` or :obj:`pandas.Series`
input index
Returns
-------
:class:`pandas.Series` or an element of the underneath type
object of :class:`pandas.Series`
"""
_func_name = 'Operator getitem().'
if not isinstance(self, SeriesType):
return None
# Note: Getitem return Series
index_is_none = isinstance(self.index, numba.types.misc.NoneType)
index_is_none_or_numeric = index_is_none or (self.index and isinstance(self.index.dtype, types.Number))
index_is_string = not index_is_none and isinstance(self.index.dtype, (types.UnicodeType, types.StringLiteral))
if (
isinstance(idx, types.Number) and index_is_none_or_numeric or
(isinstance(idx, (types.UnicodeType, types.StringLiteral)) and index_is_string)
):
def hpat_pandas_series_getitem_index_impl(self, idx):
index = self.index
mask = numpy.empty(len(self._data), numpy.bool_)
for i in numba.prange(len(index)):
mask[i] = index[i] == idx
return pandas.Series(self._data[mask], index[mask], self._name)
return hpat_pandas_series_getitem_index_impl
if (isinstance(idx, types.Integer) and index_is_string):
def hpat_pandas_series_idx_impl(self, idx):
return self._data[idx]
return hpat_pandas_series_idx_impl
if isinstance(idx, types.SliceType):
# Return slice for str values not implement
def hpat_pandas_series_getitem_idx_slice_impl(self, idx):
return pandas.Series(self._data[idx], self.index[idx], self._name)
return hpat_pandas_series_getitem_idx_slice_impl
if (
isinstance(idx, (types.List, types.Array)) and
isinstance(idx.dtype, (types.Boolean, bool))
):
def hpat_pandas_series_getitem_idx_list_impl(self, idx):
return pandas.Series(self._data[idx], self.index[idx], self._name)
return hpat_pandas_series_getitem_idx_list_impl
if (index_is_none and isinstance(idx, SeriesType)):
if isinstance(idx.data.dtype, (types.Boolean, bool)):
def hpat_pandas_series_getitem_idx_list_impl(self, idx):
index = numpy.arange(len(self._data))
if (index != idx.index).sum() == 0:
return pandas.Series(self._data[idx._data], index[idx._data], self._name)
return hpat_pandas_series_getitem_idx_list_impl
def hpat_pandas_series_getitem_idx_list_impl(self, idx):
res = numpy.copy(self._data[:len(idx._data)])
index = numpy.arange(len(self._data))
for i in numba.prange(len(res)):
for j in numba.prange(len(index)):
if j == idx._data[i]:
res[i] = self._data[j]
return pandas.Series(res, index[idx._data], self._name)
return hpat_pandas_series_getitem_idx_list_impl
if (isinstance(idx, SeriesType) and not isinstance(self.index, types.NoneType)):
if isinstance(idx.data.dtype, (types.Boolean, bool)):
# Series with str index not implement
def hpat_pandas_series_getitem_idx_series_impl(self, idx):
if (self._index != idx._index).sum() == 0:
return pandas.Series(self._data[idx._data], self._index[idx._data], self._name)
return hpat_pandas_series_getitem_idx_series_impl
def hpat_pandas_series_getitem_idx_series_impl(self, idx):
index = self.index
data = self._data
size = len(index)
data_res = []
index_res = []
for value in idx._data:
mask = numpy.zeros(shape=size, dtype=numpy.bool_)
for i in numba.prange(size):
mask[i] = index[i] == value
data_res.extend(data[mask])
index_res.extend(index[mask])
return pandas.Series(data=data_res, index=index_res, name=self._name)
return hpat_pandas_series_getitem_idx_series_impl
raise TypingError('{} The index must be an Number, Slice, String, Boolean Array or a Series.\
Given: {}'.format(_func_name, idx))
@sdc_overload(operator.setitem)
def hpat_pandas_series_setitem(self, idx, value):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.__setitem__
Examples
--------
.. literalinclude:: ../../../examples/series_setitem_int.py
:language: python
:lines: 27-
:caption: Setting Pandas Series elements
:name: ex_series_setitem
.. code-block:: console
> python ./series_setitem_int.py
0 0
1 4
2 3
3 2
4 1
dtype: int64
> python ./series_setitem_slice.py
0 5
1 4
2 0
3 0
4 0
dtype: int64
> python ./series_setitem_series.py
0 5
1 0
2 3
3 0
4 1
dtype: int64
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series operator :attr:`pandas.Series.set` implementation
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_setitem*
Parameters
----------
series: :obj:`pandas.Series`
input series
idx: :obj:`int`, :obj:`slice` or :obj:`pandas.Series`
input index
value: :object
input value
Returns
-------
:class:`pandas.Series` or an element of the underneath type
object of :class:`pandas.Series`
"""
ty_checker = TypeChecker('Operator setitem.')
ty_checker.check(self, SeriesType)
if not (isinstance(idx, (types.Integer, types.SliceType, SeriesType))):
ty_checker.raise_exc(idx, 'int, Slice, Series', 'idx')
if not((isinstance(value, SeriesType) and isinstance(value.dtype, self.dtype)) or \
isinstance(value, type(self.dtype))):
ty_checker.raise_exc(value, self.dtype, 'value')
if isinstance(idx, types.Integer) or isinstance(idx, types.SliceType):
def hpat_pandas_series_setitem_idx_integer_impl(self, idx, value):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_setitem_for_value
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_setitem_for_slice
"""
self._data[idx] = value
return self
return hpat_pandas_series_setitem_idx_integer_impl
if isinstance(idx, SeriesType):
def hpat_pandas_series_setitem_idx_series_impl(self, idx, value):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_setitem_for_series
"""
super_index = idx._data
self._data[super_index] = value
return self
return hpat_pandas_series_setitem_idx_series_impl
@sdc_overload_attribute(SeriesType, 'iloc')
def hpat_pandas_series_iloc(self):
"""
Pandas Series method :meth:`pandas.Series.iloc` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_iloc*
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`series`
returns an object of :obj:`series`
"""
_func_name = 'Attribute iloc().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_iloc_impl(self):
return sdc.datatypes.hpat_pandas_getitem_types.series_getitem_accessor_init(self, 'iloc')
return hpat_pandas_series_iloc_impl
@sdc_overload_attribute(SeriesType, 'loc')
def hpat_pandas_series_loc(self):
"""
Pandas Series method :meth:`pandas.Series.loc` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_loc*
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`series`
returns an object of :obj:`series`
"""
_func_name = 'Attribute loc().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_loc_impl(self):
return sdc.datatypes.hpat_pandas_getitem_types.series_getitem_accessor_init(self, 'loc')
return hpat_pandas_series_loc_impl
@sdc_overload_attribute(SeriesType, 'iat')
def hpat_pandas_series_iat(self):
"""
Pandas Series method :meth:`pandas.Series.iat` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_iat*
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`series`
returns an object of :obj:`series`
"""
_func_name = 'Attribute iat().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_iat_impl(self):
return sdc.datatypes.hpat_pandas_getitem_types.series_getitem_accessor_init(self, 'iat')
return hpat_pandas_series_iat_impl
@sdc_overload_attribute(SeriesType, 'at')
def hpat_pandas_series_at(self):
"""
Pandas Series method :meth:`pandas.Series.at` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_at*
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`series`
returns an object of :obj:`series`
"""
_func_name = 'Attribute at().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_at_impl(self):
return sdc.datatypes.hpat_pandas_getitem_types.series_getitem_accessor_init(self, 'at')
return hpat_pandas_series_at_impl
@sdc_overload_method(SeriesType, 'nsmallest')
def hpat_pandas_series_nsmallest(self, n=5, keep='first'):
"""
Pandas Series method :meth:`pandas.Series.nsmallest` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_nsmallest*
Parameters
----------
self: :obj:`pandas.Series`
input series
n: :obj:`int`, default 5
Return this many ascending sorted values.
keep: :obj:`str`, default 'first'
When there are duplicate values that cannot all fit in a Series of n elements:
first : return the first n occurrences in order of appearance.
last : return the last n occurrences in reverse order of appearance.
all : keep all occurrences. This can result in a Series of size larger than n.
*unsupported*
Returns
-------
:obj:`series`
returns :obj:`series`
"""
_func_name = 'Method nsmallest().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object\n given: {}\n expected: {}'.format(_func_name, self, 'series'))
if not isinstance(n, (types.Omitted, int, types.Integer)):
raise TypingError('{} The object n\n given: {}\n expected: {}'.format(_func_name, n, 'int'))
if not isinstance(keep, (types.Omitted, str, types.UnicodeType, types.StringLiteral)):
raise TypingError('{} The object keep\n given: {}\n expected: {}'.format(_func_name, keep, 'str'))
def hpat_pandas_series_nsmallest_impl(self, n=5, keep='first'):
if keep != 'first':
raise ValueError("Method nsmallest(). Unsupported parameter. Given 'keep' != 'first'")
# mergesort is used for stable sorting of repeated values
indices = self._data.argsort(kind='mergesort')[:max(n, 0)]
return self.take(indices)
return hpat_pandas_series_nsmallest_impl
@sdc_overload_method(SeriesType, 'nlargest')
def hpat_pandas_series_nlargest(self, n=5, keep='first'):
"""
Pandas Series method :meth:`pandas.Series.nlargest` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_nlargest*
Parameters
----------
self: :obj:`pandas.Series`
input series
n: :obj:`int`, default 5
Return this many ascending sorted values.
keep: :obj:`str`, default 'first'
When there are duplicate values that cannot all fit in a Series of n elements:
first : return the first n occurrences in order of appearance.
last : return the last n occurrences in reverse order of appearance.
all : keep all occurrences. This can result in a Series of size larger than n.
*unsupported*
Returns
-------
:obj:`series`
returns :obj:`series`
"""
_func_name = 'Method nlargest().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object\n given: {}\n expected: {}'.format(_func_name, self, 'series'))
if not isinstance(n, (types.Omitted, int, types.Integer)):
raise TypingError('{} The object n\n given: {}\n expected: {}'.format(_func_name, n, 'int'))
if not isinstance(keep, (types.Omitted, str, types.UnicodeType, types.StringLiteral)):
raise TypingError('{} The object keep\n given: {}\n expected: {}'.format(_func_name, keep, 'str'))
def hpat_pandas_series_nlargest_impl(self, n=5, keep='first'):
if keep != 'first':
raise ValueError("Method nlargest(). Unsupported parameter. Given 'keep' != 'first'")
# data: [0, 1, -1, 1, 0] -> [1, 1, 0, 0, -1]
# index: [0, 1, 2, 3, 4] -> [1, 3, 0, 4, 2] (not [3, 1, 4, 0, 2])
# subtract 1 to ensure reverse ordering at boundaries
indices = (-self._data - 1).argsort(kind='mergesort')[:max(n, 0)]
return self.take(indices)
return hpat_pandas_series_nlargest_impl
@sdc_overload_attribute(SeriesType, 'shape')
def hpat_pandas_series_shape(self):
"""
Pandas Series attribute :attr:`pandas.Series.shape` implementation
**Algorithm**: result = series.shape
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shape1
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:obj:`tuple`
a tuple of the shape of the underlying data
"""
_func_name = 'Attribute shape.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_shape_impl(self):
return self._data.shape
return hpat_pandas_series_shape_impl
@sdc_overload_method(SeriesType, 'std')
def hpat_pandas_series_std(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None):
"""
Pandas Series method :meth:`pandas.Series.std` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_std
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_std_unboxing
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_std_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_std_unsupported_params
Parameters
----------
self: :obj:`pandas.Series`
input series
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
skipna: :obj:`bool`
exclude NA/null values
level: :obj:`int`, :obj:`str`
If the axis is a MultiIndex (hierarchical),
count along a particular level, collapsing into a scalar
*unsupported*
ddof: :obj:`int`
Delta Degrees of Freedom.
The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only: :obj:`bool`
Include only float, int, boolean columns.
If None, will attempt to use everything, then use only numeric data.
Not implemented for Series.
*unsupported*
Returns
-------
:obj:`scalar`
returns :obj:`scalar`
"""
_func_name = 'Method std().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
msg = '{} The object must be a number. Given self.data.dtype: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
if not isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) and skipna is not None:
raise TypingError('{} The object must be a boolean. Given skipna: {}'.format(_func_name, skipna))
if not isinstance(ddof, (types.Omitted, int, types.Integer)):
raise TypingError('{} The object must be an integer. Given ddof: {}'.format(_func_name, ddof))
for name, arg in [('axis', axis), ('level', level), ('numeric_only', numeric_only)]:
if not isinstance(arg, (types.Omitted, types.NoneType)) and arg is not None:
raise TypingError('{} Unsupported parameters. Given {}: {}'.format(_func_name, name, arg))
def hpat_pandas_series_std_impl(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None):
var = self.var(axis=axis, skipna=skipna, level=level, ddof=ddof, numeric_only=numeric_only)
return var ** 0.5
return hpat_pandas_series_std_impl
@sdc_overload_attribute(SeriesType, 'values')
def hpat_pandas_series_values(self):
"""
Pandas Series attribute 'values' implementation.
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.values.html#pandas.Series.values
Algorithm: result = series.values
Where:
series: pandas.series
result: pandas.series as ndarray
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_values
"""
_func_name = 'Attribute values.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_values_impl(self):
return self._data
return hpat_pandas_series_values_impl
@sdc_overload_method(SeriesType, 'value_counts')
def hpat_pandas_series_value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.value_counts
Examples
--------
.. literalinclude:: ../../../examples/series/series_value_counts.py
:language: python
:lines: 27-
:caption: Getting the number of values excluding NaNs
:name: ex_series_value_counts
.. command-output:: python ./series/series_value_counts.py
:cwd: ../../../examples
.. note::
Parameter bins and dropna for Strings are currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.count <pandas.Series.count>`
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.value_counts` implementation.
Note: Elements with the same count might appear in result in a different order than in Pandas
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_value_counts*
Parameters
-----------
self: :obj:`pandas.Series`
input series
normalize: :obj:`boolean`, default False
If True then the object returned will contain the relative frequencies of the unique values
sort: :obj: `boolean`, default True
Sort by frequencies
ascending: :obj:`boolean`, default False
Sort in ascending order
bins: :obj:`integer`, default None
*unsupported*
dropna: :obj:`boolean`, default True
Skip counts of NaN
Returns
-------
:returns :obj:`pandas.Series`
"""
_func_name = 'Method value_counts().'
ty_checker = TypeChecker('Method value_counts().')
ty_checker.check(self, SeriesType)
if not isinstance(normalize, (types.Omitted, types.Boolean, bool)) and normalize is True:
ty_checker.raise_exc(normalize, 'boolean', 'normalize')
if not isinstance(sort, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(sort, 'boolean', 'sort')
if not isinstance(ascending, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(ascending, 'boolean', 'ascending')
if not isinstance(bins, (types.Omitted, types.NoneType)) and bins is not None:
ty_checker.raise_exc(bins, 'boolean', 'bins')
if not isinstance(dropna, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(dropna, 'boolean', 'dropna')
if isinstance(self.data, StringArrayType):
def hpat_pandas_series_value_counts_str_impl(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
value_counts_dict = Dict.empty(
key_type=types.unicode_type,
value_type=types.intp
)
nan_counts = 0
for i, value in enumerate(self._data):
if str_arr_is_na(self._data, i):
if not dropna:
nan_counts += 1
continue
value_counts_dict[value] = value_counts_dict.get(value, 0) + 1
need_add_nan_count = not dropna and nan_counts
values = [key for key in value_counts_dict]
counts_as_list = [value_counts_dict[key] for key in value_counts_dict.keys()]
values_len = len(values)
if need_add_nan_count:
# append a separate empty string for NaN elements
values_len += 1
values.append('')
counts_as_list.append(nan_counts)
counts = numpy.asarray(counts_as_list, dtype=numpy.intp)
indexes_order = numpy.arange(values_len)
if sort:
indexes_order = counts.argsort()
if not ascending:
indexes_order = indexes_order[::-1]
counts_sorted = numpy.take(counts, indexes_order)
values_sorted_by_count = [values[i] for i in indexes_order]
# allocate the result index as a StringArray and copy values to it
index_string_lengths = numpy.asarray([len(s) for s in values_sorted_by_count])
index_total_chars = numpy.sum(index_string_lengths)
result_index = pre_alloc_string_array(len(values_sorted_by_count), index_total_chars)
cp_str_list_to_array(result_index, values_sorted_by_count)
if need_add_nan_count:
# set null bit for StringArray element corresponding to NaN element (was added as last in values)
index_previous_nan_pos = values_len - 1
for i in numpy.arange(values_len):
if indexes_order[i] == index_previous_nan_pos:
str_arr_set_na(result_index, i)
break
return pandas.Series(counts_sorted, index=result_index, name=self._name)
return hpat_pandas_series_value_counts_str_impl
elif isinstance(self.dtype, types.Number):
series_dtype = self.dtype
def hpat_pandas_series_value_counts_number_impl(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
value_counts_dict = Dict.empty(
key_type=series_dtype,
value_type=types.intp
)
zero_counts = 0
is_zero_found = False
for value in self._data:
if (dropna and numpy.isnan(value)):
continue
# Pandas hash-based value_count_float64 function doesn't distinguish between
# positive and negative zeros, hence we count zero values separately and store
# as a key the first zero value found in the Series
if not value:
zero_counts += 1
if not is_zero_found:
zero_value = value
is_zero_found = True
continue
value_counts_dict[value] = value_counts_dict.get(value, 0) + 1
if zero_counts:
value_counts_dict[zero_value] = zero_counts
unique_values = numpy.asarray(
list(value_counts_dict),
dtype=self._data.dtype
)
value_counts = numpy.asarray(
[value_counts_dict[key] for key in value_counts_dict],
dtype=numpy.intp
)
indexes_order = numpy.arange(len(value_counts))
if sort:
indexes_order = value_counts.argsort()
if not ascending:
indexes_order = indexes_order[::-1]
sorted_unique_values = numpy.take(unique_values, indexes_order)
sorted_value_counts = numpy.take(value_counts, indexes_order)
return pandas.Series(sorted_value_counts, index=sorted_unique_values, name=self._name)
return hpat_pandas_series_value_counts_number_impl
return None
@sdc_overload_method(SeriesType, 'var')
def hpat_pandas_series_var(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None):
"""
Pandas Series method :meth:`pandas.Series.var` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_var
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_var_unboxing
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_var_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_var_unsupported_params
Parameters
----------
self: :obj:`pandas.Series`
input series
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
skipna: :obj:`bool`
exclude NA/null values
level: :obj:`int`, :obj:`str`
If the axis is a MultiIndex (hierarchical),
count along a particular level, collapsing into a scalar
*unsupported*
ddof: :obj:`int`
Delta Degrees of Freedom.
The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only: :obj:`bool`
Include only float, int, boolean columns.
If None, will attempt to use everything, then use only numeric data.
Not implemented for Series.
*unsupported*
Returns
-------
:obj:`scalar`
returns :obj:`scalar`
"""
_func_name = 'Method var().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
msg = '{} The object must be a number. Given self.data.dtype: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
if not isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) and skipna is not None:
raise TypingError('{} The object must be a boolean. Given skipna: {}'.format(_func_name, skipna))
if not isinstance(ddof, (types.Omitted, int, types.Integer)):
raise TypingError('{} The object must be an integer. Given ddof: {}'.format(_func_name, ddof))
for name, arg in [('axis', axis), ('level', level), ('numeric_only', numeric_only)]:
if not isinstance(arg, (types.Omitted, types.NoneType)) and arg is not None:
raise TypingError('{} Unsupported parameters. Given {}: {}'.format(_func_name, name, arg))
def hpat_pandas_series_var_impl(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None):
if skipna is None:
skipna = True
if skipna:
valuable_length = len(self._data) - numpy.sum(numpy.isnan(self._data))
if valuable_length <= ddof:
return numpy.nan
return numpy.nanvar(self._data) * valuable_length / (valuable_length - ddof)
if len(self._data) <= ddof:
return numpy.nan
return self._data.var() * len(self._data) / (len(self._data) - ddof)
return hpat_pandas_series_var_impl
@sdc_overload_attribute(SeriesType, 'index')
def hpat_pandas_series_index(self):
"""
Pandas Series attribute :attr:`pandas.Series.index` implementation
**Algorithm**: result = series.index
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_index1
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_index2
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:class:`pandas.Series`
the index of the Series
"""
_func_name = 'Attribute index.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if isinstance(self.index, types.NoneType) or self.index is None:
def hpat_pandas_series_index_none_impl(self):
return numpy.arange(len(self._data))
return hpat_pandas_series_index_none_impl
else:
def hpat_pandas_series_index_impl(self):
return self._index
return hpat_pandas_series_index_impl
@sdc_overload_method(SeriesType, 'rolling')
def hpat_pandas_series_rolling(self, window, min_periods=None, center=False,
win_type=None, on=None, axis=0, closed=None):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.rolling
Examples
--------
.. literalinclude:: ../../../examples/series/rolling/series_rolling_min.py
:language: python
:lines: 27-
:caption: Calculate the rolling minimum.
:name: ex_series_rolling
.. command-output:: python ./series/rolling/series_rolling_min.py
:cwd: ../../../examples
.. todo:: Add support of parameters ``center``, ``win_type``, ``on``, ``axis`` and ``closed``
.. seealso::
:ref:`expanding <pandas.Series.expanding>`
Provides expanding transformations.
:ref:`ewm <pandas.Series.ewm>`
Provides exponential weighted functions.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series attribute :attr:`pandas.Series.rolling` implementation
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_rolling.TestRolling.test_series_rolling
Parameters
----------
series: :obj:`pandas.Series`
Input Series.
window: :obj:`int` or :obj:`offset`
Size of the moving window.
min_periods: :obj:`int`
Minimum number of observations in window required to have a value.
center: :obj:`bool`
Set the labels at the center of the window.
*unsupported*
win_type: :obj:`str`
Provide a window type.
*unsupported*
on: :obj:`str`
Column on which to calculate the rolling window.
*unsupported*
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
closed: :obj:`str`
Make the interval closed on the ‘right’, ‘left’, ‘both’ or ‘neither’ endpoints.
*unsupported*
Returns
-------
:class:`pandas.Series.rolling`
Output class to manipulate with input data.
"""
ty_checker = TypeChecker('Method rolling().')
ty_checker.check(self, SeriesType)
if not isinstance(window, types.Integer):
ty_checker.raise_exc(window, 'int', 'window')
minp_accepted = (types.Omitted, types.NoneType, types.Integer)
if not isinstance(min_periods, minp_accepted) and min_periods is not None:
ty_checker.raise_exc(min_periods, 'None, int', 'min_periods')
center_accepted = (types.Omitted, types.Boolean)
if not isinstance(center, center_accepted) and center is not False:
ty_checker.raise_exc(center, 'bool', 'center')
str_types = (types.Omitted, types.NoneType, types.StringLiteral, types.UnicodeType)
if not isinstance(win_type, str_types) and win_type is not None:
ty_checker.raise_exc(win_type, 'str', 'win_type')
if not isinstance(on, str_types) and on is not None:
ty_checker.raise_exc(on, 'str', 'on')
axis_accepted = (types.Omitted, types.Integer, types.StringLiteral, types.UnicodeType)
if not isinstance(axis, axis_accepted) and axis != 0:
ty_checker.raise_exc(axis, 'int, str', 'axis')
if not isinstance(closed, str_types) and closed is not None:
ty_checker.raise_exc(closed, 'str', 'closed')
nan_minp = isinstance(min_periods, (types.Omitted, types.NoneType)) or min_periods is None
def hpat_pandas_series_rolling_impl(self, window, min_periods=None, center=False,
win_type=None, on=None, axis=0, closed=None):
if window < 0:
raise ValueError('window must be non-negative')
if nan_minp == True: # noqa
minp = window
else:
minp = min_periods
if minp < 0:
raise ValueError('min_periods must be >= 0')
if minp > window:
raise ValueError('min_periods must be <= window')
if center != False: # noqa
raise ValueError('Method rolling(). The object center\n expected: False')
if win_type is not None:
raise ValueError('Method rolling(). The object win_type\n expected: None')
if on is not None:
raise ValueError('Method rolling(). The object on\n expected: None')
if axis != 0:
raise ValueError('Method rolling(). The object axis\n expected: 0')
if closed is not None:
raise ValueError('Method rolling(). The object closed\n expected: None')
return _hpat_pandas_series_rolling_init(self, window, minp, center,
win_type, on, axis, closed)
return hpat_pandas_series_rolling_impl
@sdc_overload_attribute(SeriesType, 'size')
def hpat_pandas_series_size(self):
"""
Pandas Series attribute :attr:`pandas.Series.size` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_size
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:class:`pandas.Series`
Return the number of elements in the underlying data.
"""
_func_name = 'Attribute size.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_size_impl(self):
return len(self._data)
return hpat_pandas_series_size_impl
@sdc_overload_attribute(SeriesType, 'str')
def hpat_pandas_series_str(self):
"""
Pandas Series attribute :attr:`pandas.Series.str` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_hiframes.TestHiFrames.test_str_get
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:class:`pandas.core.strings.StringMethods`
Output class to manipulate with input data.
"""
_func_name = 'Attribute str.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, (types.List, types.UnicodeType)):
msg = '{} Can only use .str accessor with string values. Given: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
def hpat_pandas_series_str_impl(self):
return pandas.core.strings.StringMethods(self)
return hpat_pandas_series_str_impl
@sdc_overload_attribute(SeriesType, 'ndim')
def hpat_pandas_series_ndim(self):
"""
Pandas Series attribute :attr:`pandas.Series.ndim` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_getattr_ndim
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`int`
Number of dimensions of the underlying data, by definition 1
"""
_func_name = 'Attribute ndim.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_ndim_impl(self):
return 1
return hpat_pandas_series_ndim_impl
@sdc_overload_attribute(SeriesType, 'T')
def hpat_pandas_series_T(self):
"""
Pandas Series attribute :attr:`pandas.Series.T` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_getattr_T
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`numpy.ndarray`
An array representing the underlying data
"""
_func_name = 'Attribute T.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_T_impl(self):
return self._data
return hpat_pandas_series_T_impl
@sdc_overload(len)
def hpat_pandas_series_len(self):
"""
Pandas Series operator :func:`len` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_len
Parameters
----------
series: :class:`pandas.Series`
Returns
-------
:obj:`int`
number of items in the object
"""
_func_name = 'Operator len().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_len_impl(self):
return len(self._data)
return hpat_pandas_series_len_impl
@sdc_overload_method(SeriesType, 'astype')
def hpat_pandas_series_astype(self, dtype, copy=True, errors='raise'):
"""
Pandas Series method :meth:`pandas.Series.astype` implementation.
Cast a pandas object to a specified dtype dtype
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_astype*
Parameters
-----------
dtype : :obj:`numpy.dtype` or :obj:`dict`
Use a numpy.dtype or Python type to cast entire pandas object to the same type.
Alternatively, use {col: dtype, …}, where col is a column label and dtype is a numpy.dtype
or Python type to cast one or more of the DataFrame’s columns to column-specific types.
copy : :obj:`bool`, default :obj:`True`
Return a copy when True
Currently copy=False is not supported
errors : :obj:`str`, default :obj:`'raise'`
Control raising of exceptions on invalid data for provided dtype.
* raise : allow exceptions to be raised
* ignore : suppress exceptions. On error return original object
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` Cast a :obj:`pandas.Series` to a specified dtype dtype
"""
_func_name = 'Method astype().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if not isinstance(copy, (types.Omitted, bool, types.Boolean)):
raise TypingError('{} The object must be a boolean. Given copy: {}'.format(_func_name, copy))
if (not isinstance(errors, (types.Omitted, str, types.UnicodeType, types.StringLiteral)) and
errors in ('raise', 'ignore')):
raise TypingError('{} The object must be a string literal. Given errors: {}'.format(_func_name, errors))
# Return StringArray for astype(str) or astype('str')
def hpat_pandas_series_astype_to_str_impl(self, dtype, copy=True, errors='raise'):
num_chars = 0
arr_len = len(self._data)
# Get total chars for new array
for i in numba.parfor.internal_prange(arr_len):
item = self._data[i]
num_chars += len(str(item)) # TODO: check NA
data = sdc.str_arr_ext.pre_alloc_string_array(arr_len, num_chars)
for i in numba.parfor.internal_prange(arr_len):
item = self._data[i]
data[i] = str(item) # TODO: check NA
return pandas.Series(data, self._index, self._name)
# Return npytypes.Array from npytypes.Array for astype(types.functions.NumberClass), example - astype(np.int64)
def hpat_pandas_series_astype_numba_impl(self, dtype, copy=True, errors='raise'):
return pandas.Series(self._data.astype(dtype), self._index, self._name)
# Return npytypes.Array from npytypes.Array for astype(types.StringLiteral), example - astype('int64')
def hpat_pandas_series_astype_literal_type_numba_impl(self, dtype, copy=True, errors='raise'):
return pandas.Series(self._data.astype(numpy.dtype(dtype)), self._index, self._name)
# Return self
def hpat_pandas_series_astype_no_modify_impl(self, dtype, copy=True, errors='raise'):
return pandas.Series(self._data, self._index, self._name)
if ((isinstance(dtype, types.Function) and dtype.typing_key == str)
or (isinstance(dtype, types.StringLiteral) and dtype.literal_value == 'str')):
return hpat_pandas_series_astype_to_str_impl
# Needs Numba astype impl support converting unicode_type to NumberClass and other types
if isinstance(self.data, StringArrayType):
if isinstance(dtype, types.functions.NumberClass) and errors == 'raise':
raise TypingError(f'Needs Numba astype impl support converting unicode_type to {dtype}')
if isinstance(dtype, types.StringLiteral) and errors == 'raise':
try:
literal_value = numpy.dtype(dtype.literal_value)
except:
pass # Will raise the exception later
else:
raise TypingError(f'Needs Numba astype impl support converting unicode_type to {dtype.literal_value}')
if isinstance(self.data, types.npytypes.Array) and isinstance(dtype, types.functions.NumberClass):
return hpat_pandas_series_astype_numba_impl
if isinstance(self.data, types.npytypes.Array) and isinstance(dtype, types.StringLiteral):
try:
literal_value = numpy.dtype(dtype.literal_value)
except:
pass # Will raise the exception later
else:
return hpat_pandas_series_astype_literal_type_numba_impl
# Raise error if dtype is not supported
if errors == 'raise':
raise TypingError(f'{_func_name} The object must be a supported type. Given dtype: {dtype}')
else:
return hpat_pandas_series_astype_no_modify_impl
@sdc_overload_method(SeriesType, 'shift')
def hpat_pandas_series_shift(self, periods=1, freq=None, axis=0, fill_value=None):
"""
Pandas Series method :meth:`pandas.Series.shift` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift_unboxing
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift_full
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift_fill_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift_unsupported_params
Parameters
----------
self: :obj:`pandas.Series`
input series
periods: :obj:`int`
Number of periods to shift. Can be positive or negative.
freq: :obj:`DateOffset`, :obj:`tseries.offsets`, :obj:`timedelta`, :obj:`str`
Offset to use from the tseries module or time rule (e.g. ‘EOM’).
*unsupported*
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
fill_value : :obj:`int`, :obj:`float`
The scalar value to use for newly introduced missing values.
Returns
-------
:obj:`scalar`
returns :obj:`series` object
"""
_func_name = 'Method shift().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
msg = '{} The object must be a number. Given self.data.dtype: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
if not isinstance(fill_value, (types.Omitted, types.Number, types.NoneType)) and fill_value is not None:
raise TypingError('{} The object must be a number. Given fill_value: {}'.format(_func_name, fill_value))
if not isinstance(freq, (types.Omitted, types.NoneType)) and freq is not None:
raise TypingError('{} Unsupported parameters. Given freq: {}'.format(_func_name, freq))
if not isinstance(axis, (types.Omitted, int, types.Integer)) and not axis:
raise TypingError('{} Unsupported parameters. Given axis: {}'.format(_func_name, axis))
fill_is_default = isinstance(fill_value, (types.Omitted, types.NoneType)) or fill_value is None
series_np_dtype = [numpy_support.as_dtype(self.data.dtype)]
fill_np_dtype = [numpy.float64 if fill_is_default else numpy_support.as_dtype(fill_value)]
fill_dtype = types.float64 if fill_is_default else fill_value
common_dtype = find_common_dtype_from_numpy_dtypes([], [self.data.dtype, fill_dtype])
if fill_is_default:
def hpat_pandas_series_shift_impl(self, periods=1, freq=None, axis=0, fill_value=None):
if axis != 0:
raise TypingError('Method shift(). Unsupported parameters. Given axis != 0')
arr = numpy.empty(shape=len(self._data), dtype=common_dtype)
if periods > 0:
arr[:periods] = numpy.nan
arr[periods:] = self._data[:-periods]
elif periods < 0:
arr[periods:] = numpy.nan
arr[:periods] = self._data[-periods:]
else:
arr[:] = self._data
return pandas.Series(data=arr, index=self._index, name=self._name)
return hpat_pandas_series_shift_impl
def hpat_pandas_series_shift_impl(self, periods=1, freq=None, axis=0, fill_value=None):
if axis != 0:
raise TypingError('Method shift(). Unsupported parameters. Given axis != 0')
arr = numpy.empty(len(self._data), dtype=common_dtype)
if periods > 0:
arr[:periods] = fill_value
arr[periods:] = self._data[:-periods]
elif periods < 0:
arr[periods:] = fill_value
arr[:periods] = self._data[-periods:]
else:
arr[:] = self._data
return pandas.Series(data=arr, index=self._index, name=self._name)
return hpat_pandas_series_shift_impl
@sdc_overload_method(SeriesType, 'isin')
def hpat_pandas_series_isin(self, values):
"""
Pandas Series method :meth:`pandas.Series.isin` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_isin_list1
Parameters
-----------
values : :obj:`list` or :obj:`set` object
specifies values to look for in the series
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object indicating if each element of self is in values
"""
_func_name = 'Method isin().'
if not isinstance(self, SeriesType):
raise TypingError(
'{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if not isinstance(values, (types.Set, types.List)):
raise TypingError(
'{} The argument must be set or list-like object. Given values: {}'.format(_func_name, values))
def hpat_pandas_series_isin_impl(self, values):
# TODO: replace with below line when Numba supports np.isin in nopython mode
# return pandas.Series(np.isin(self._data, values))
return pandas.Series(data=[(x in values) for x in self._data], index=self._index, name=self._name)
return hpat_pandas_series_isin_impl
@sdc_overload_method(SeriesType, 'append')
def hpat_pandas_series_append(self, to_append, ignore_index=False, verify_integrity=False):
"""
Pandas Series method :meth:`pandas.Series.append` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_append*
Parameters
-----------
self: :obj:`pandas.Series`
input series
to_append : :obj:`pandas.Series` object or :obj:`list` or :obj:`set`
Series (or list or tuple of Series) to append with self
ignore_index: :obj:`bool`, default False
If True, do not use the index labels.
Supported as literal value only
verify_integrity: :obj:`bool`, default False
If True, raise Exception on creating index with duplicates.
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
Concatenated Series
"""
_func_name = 'Method append().'
if not isinstance(self, SeriesType):
raise TypingError(
'{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if not (isinstance(to_append, SeriesType)
or (isinstance(to_append, (types.UniTuple, types.List)) and isinstance(to_append.dtype, SeriesType))):
raise TypingError(
'{} The argument must be a pandas.series or list/tuple of pandas.series. \
Given to_append: {}'.format(_func_name, to_append))
# currently we will always raise this in the end, i.e. if no impl was found
# TODO: find a way to stop compilation early and not proceed with unliteral step
if not (isinstance(ignore_index, types.Literal) and isinstance(ignore_index, types.Boolean)
or isinstance(ignore_index, types.Omitted)
or ignore_index is False):
raise TypingError(
'{} The ignore_index must be a literal Boolean constant. Given: {}'.format(_func_name, ignore_index))
if not (verify_integrity is False or isinstance(verify_integrity, types.Omitted)):
raise TypingError(
'{} Unsupported parameters. Given verify_integrity: {}'.format(_func_name, verify_integrity))
# ignore_index value has to be known at compile time to select between implementations with different signatures
ignore_index_is_false = (common_functions.has_literal_value(ignore_index, False)
or common_functions.has_python_value(ignore_index, False)
or isinstance(ignore_index, types.Omitted))
to_append_is_series = isinstance(to_append, SeriesType)
if ignore_index_is_false:
def hpat_pandas_series_append_impl(self, to_append, ignore_index=False, verify_integrity=False):
if to_append_is_series == True: # noqa
new_data = common_functions.hpat_arrays_append(self._data, to_append._data)
new_index = common_functions.hpat_arrays_append(self.index, to_append.index)
else:
data_arrays_to_append = [series._data for series in to_append]
index_arrays_to_append = [series.index for series in to_append]
new_data = common_functions.hpat_arrays_append(self._data, data_arrays_to_append)
new_index = common_functions.hpat_arrays_append(self.index, index_arrays_to_append)
return pandas.Series(new_data, new_index)
return hpat_pandas_series_append_impl
else:
def hpat_pandas_series_append_ignore_index_impl(self, to_append, ignore_index=False, verify_integrity=False):
if to_append_is_series == True: # noqa
new_data = common_functions.hpat_arrays_append(self._data, to_append._data)
else:
arrays_to_append = [series._data for series in to_append]
new_data = common_functions.hpat_arrays_append(self._data, arrays_to_append)
return pandas.Series(new_data, None)
return hpat_pandas_series_append_ignore_index_impl
@sdc_overload_method(SeriesType, 'copy')
def hpat_pandas_series_copy(self, deep=True):
"""
Pandas Series method :meth:`pandas.Series.copy` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_copy_str1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_copy_int1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_copy_deep
Parameters
-----------
self: :class:`pandas.Series`
input arg
deep: :obj:`bool`, default :obj:`True`
Make a deep copy, including a copy of the data and the indices.
With deep=False neither the indices nor the data are copied.
[SDC limitations]:
- deep=False: shallow copy of index is not supported
Returns
-------
:obj:`pandas.Series` or :obj:`pandas.DataFrame`
Object type matches caller.
"""
ty_checker = TypeChecker('Method Series.copy().')
ty_checker.check(self, SeriesType)
if not isinstance(deep, (types.Omitted, types.Boolean)) and not deep:
ty_checker.raise_exc(self.data, 'boolean', 'deep')
if isinstance(self.index, types.NoneType):
def hpat_pandas_series_copy_impl(self, deep=True):
if deep:
return pandas.Series(data=self._data.copy(), name=self._name)
else:
return pandas.Series(data=self._data, name=self._name)
return hpat_pandas_series_copy_impl
else:
def hpat_pandas_series_copy_impl(self, deep=True):
if deep:
return pandas.Series(data=self._data.copy(), index=self._index.copy(), name=self._name)
else:
# Shallow copy of index is not supported yet
return pandas.Series(data=self._data, index=self._index.copy(), name=self._name)
return hpat_pandas_series_copy_impl
@sdc_overload_method(SeriesType, 'corr')
def hpat_pandas_series_corr(self, other, method='pearson', min_periods=None):
"""
Pandas Series method :meth:`pandas.Series.corr` implementation.
Note: Unsupported mixed numeric and string data
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_corr
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_corr_unsupported_dtype
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_corr_unsupported_period
Parameters
----------
self: :obj:`pandas.Series`
input series
other: :obj:`pandas.Series`
input series
method:
*unsupported
min_periods: :obj:`int`, default None
Returns
-------
:obj:`float`
returns :obj:`float` object
"""
ty_checker = TypeChecker('Method corr().')
ty_checker.check(self, SeriesType)
ty_checker.check(other, SeriesType)
if not isinstance(self.data.dtype, types.Number):
ty_checker.raise_exc(self.data, 'number', 'self.data')
if not isinstance(other.data.dtype, types.Number):
ty_checker.raise_exc(other.data, 'number', 'other.data')
if not isinstance(min_periods, (int, types.Integer, types.Omitted, types.NoneType)) and min_periods is not None:
ty_checker.raise_exc(min_periods, 'int64', 'min_periods')
def hpat_pandas_series_corr_impl(self, other, method='pearson', min_periods=None):
if min_periods is None:
min_periods = 1
if len(self._data) == 0 or len(other._data) == 0:
return numpy.nan
self_arr = self._data[:min(len(self._data), len(other._data))]
other_arr = other._data[:min(len(self._data), len(other._data))]
invalid = numpy.isnan(self_arr) | numpy.isnan(other_arr)
if invalid.any():
self_arr = self_arr[~invalid]
other_arr = other_arr[~invalid]
if len(self_arr) < min_periods:
return numpy.nan
new_self = pandas.Series(self_arr)
new_other = pandas.Series(other_arr)
n = new_self.count()
ma = new_self.sum()
mb = new_other.sum()
a = n * (self_arr * other_arr).sum() - ma * mb
b1 = n * (self_arr * self_arr).sum() - ma * ma
b2 = n * (other_arr * other_arr).sum() - mb * mb
if b1 == 0 or b2 == 0:
return numpy.nan
return a / numpy.sqrt(b1 * b2)
return hpat_pandas_series_corr_impl
@sdc_overload_method(SeriesType, 'head')
def hpat_pandas_series_head(self, n=5):
"""
Pandas Series method :meth:`pandas.Series.head` implementation.
.. only:: developer
Test: python -m -k sdc.runtests sdc.tests.test_series.TestSeries.test_series_head*
Parameters
-----------
n: :obj:`int`, default 5
input argument, default 5
Returns
-------
:obj:`pandas.Series`
returns: The first n rows of the caller object.
"""
_func_name = 'Method head().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(n, (types.Integer, types.Omitted)) and n != 5:
raise TypingError('{} The parameter must be an integer type. Given type n: {}'.format(_func_name, n))
if isinstance(self.index, types.NoneType):
def hpat_pandas_series_head_impl(self, n=5):
return pandas.Series(data=self._data[:n], name=self._name)
return hpat_pandas_series_head_impl
else:
def hpat_pandas_series_head_index_impl(self, n=5):
return pandas.Series(data=self._data[:n], index=self._index[:n], name=self._name)
return hpat_pandas_series_head_index_impl
@sdc_overload_method(SeriesType, 'groupby')
def hpat_pandas_series_groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False):
"""
Pandas Series method :meth:`pandas.Series.groupby` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_groupby_count
Parameters
-----------
self: :class:`pandas.Series`
input arg
by: :obj:`pandas.Series` object
Used to determine the groups for the groupby
axis:
*unsupported*
level:
*unsupported*
as_index:
*unsupported*
sort:
*unsupported*
group_keys:
*unsupported*
squeeze:
*unsupported*
observed:
*unsupported*
Returns
-------
:obj:`pandas.SeriesGroupBy`
returns :obj:`pandas.SeriesGroupBy` object
"""
_func_name = 'Method Series.groupby().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if by is None and axis is None:
raise TypingError("{} You have to supply one of 'by' or 'axis' parameters".format(_func_name))
if level is not None and not isinstance(level, (types.Integer, types.NoneType, types.Omitted)):
raise TypingError("{} 'level' must be an Integer. Given: {}".format(_func_name, level))
def hpat_pandas_series_groupby_impl(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False):
# TODO Needs to implement parameters value check
# if level is not None and (level < -1 or level > 0):
# raise ValueError("Method Series.groupby(). level > 0 or level < -1 only valid with MultiIndex")
return pandas.core.groupby.SeriesGroupBy(self)
return hpat_pandas_series_groupby_impl
@sdc_overload_method(SeriesType, 'isnull')
@sdc_overload_method(SeriesType, 'isna')
def hpat_pandas_series_isna(self):
"""
Pandas Series method :meth:`pandas.Series.isna` and :meth:`pandas.Series.isnull` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_isna1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_str_isna1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_isnull1
Parameters
-----------
self : :obj:`pandas.Series` object
input argument
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method isna/isnull().'
if not isinstance(self, SeriesType):
raise TypingError(
'{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if isinstance(self.data.dtype, (types.Integer, types.Float)):
def hpat_pandas_series_isna_impl(self):
return pandas.Series(data=numpy.isnan(self._data), index=self._index, name=self._name)
return hpat_pandas_series_isna_impl
if isinstance(self.data.dtype, types.UnicodeType):
def hpat_pandas_series_isna_impl(self):
result = numpy.empty(len(self._data), numpy.bool_)
byte_size = 8
# iterate over bits in StringArrayType null_bitmap and fill array indicating if array's element are NaN
for i in range(len(self._data)):
bmap_idx = i // byte_size
bit_idx = i % byte_size
bmap = self._data.null_bitmap[bmap_idx]
bit_value = (bmap >> bit_idx) & 1
result[i] = bit_value == 0
return pandas.Series(result, index=self._index, name=self._name)
return hpat_pandas_series_isna_impl
@sdc_overload_method(SeriesType, 'notna')
def hpat_pandas_series_notna(self):
"""
Pandas Series method :meth:`pandas.Series.notna` implementation.
.. only:: developer
Test: python -m -k sdc.runtests sdc.tests.test_series.TestSeries.test_series_notna*
Parameters
-----------
self : :obj:`pandas.Series` object
input series
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method notna().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if isinstance(self.data.dtype, types.Number):
def hpat_pandas_series_notna_impl(self):
return pandas.Series(numpy.invert(numpy.isnan(self._data)), index=self._index, name=self._name)
return hpat_pandas_series_notna_impl
if isinstance(self.data.dtype, types.UnicodeType):
def hpat_pandas_series_notna_impl(self):
result = self.isna()
return pandas.Series(numpy.invert(result._data), index=self._index, name=self._name)
return hpat_pandas_series_notna_impl
@sdc_overload_method(SeriesType, 'ne')
def hpat_pandas_series_ne(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.ne` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method ne().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_ne_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data != other._data)
return hpat_pandas_series_ne_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_ne_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data != other)
return hpat_pandas_series_ne_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'add')
def hpat_pandas_series_add(self, other, level=None, fill_value=None, axis=0):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.add
Examples
--------
.. literalinclude:: ../../../examples/series/series_add.py
:language: python
:lines: 27-
:caption: Getting the addition of Series and other
:name: ex_series_add
.. command-output:: python ./series/series_add.py
:cwd: ../../../examples
.. note::
Parameters level, fill_value, axis are currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.radd <pandas.Series.radd>`
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.add` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: :obj:`int` default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method add().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
ty_checker.raise_exc(fill_value, 'None', 'fill_value')
if not (isinstance(axis, types.Omitted) or axis == 0):
ty_checker.raise_exc(axis, 'int', 'axis')
if isinstance(other, SeriesType):
def hpat_pandas_series_add_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data + other._data)
return hpat_pandas_series_add_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_add_number_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
if axis != 0:
raise ValueError('Method add(). The object axis\n expected: 0')
return pandas.Series(self._data + other)
return hpat_pandas_series_add_number_impl
ty_checker.raise_exc(other, 'Series, int, float', 'other')
@sdc_overload_method(SeriesType, 'sub')
def hpat_pandas_series_sub(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.sub` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method sub().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_sub_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data - other._data)
return hpat_pandas_series_sub_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_sub_number_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(self._data - other)
return hpat_pandas_series_sub_number_impl
raise TypingError('{} The object must be a pandas.series or scalar. Given other: {}'.format(_func_name, other))
@sdc_overload_method(SeriesType, 'sum')
def hpat_pandas_series_sum(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
):
"""
Pandas Series method :meth:`pandas.Series.sum` implementation.
.. only:: developer
Tests:
python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_sum*
Parameters
----------
self: :class:`pandas.Series`
input series
axis:
*unsupported*
skipna: :obj:`bool`, default :obj:`True`
Exclude NA/null values when computing the result.
level:
*unsupported*
numeric_only:
*unsupported*
min_count:
*unsupported*
Returns
-------
:obj:`float`
scalar or Series (if level specified)
"""
_func_name = 'Method sum().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(axis, (types.Integer, types.Omitted)) or axis is None):
raise TypingError('{} The axis must be an Integer. Currently unsupported. Given: {}'.format(_func_name, axis))
if not (isinstance(skipna, (types.Boolean, types.Omitted, types.NoneType)) or skipna is None):
raise TypingError('{} The skipna must be a Boolean. Given: {}'.format(_func_name, skipna))
if not (isinstance(level, (types.Integer, types.StringLiteral, types.Omitted, types.NoneType)) or level is None):
raise TypingError(
'{} The level must be an Integer or level name. Currently unsupported. Given: {}'.format(
_func_name, level))
if not (isinstance(numeric_only, (types.Boolean, types.Omitted)) or numeric_only is None):
raise TypingError(
'{} The numeric_only must be a Boolean. Currently unsupported. Given: {}'.format(
_func_name, numeric_only))
if not (isinstance(min_count, (types.Integer, types.Omitted)) or min_count == 0):
raise TypingError(
'{} The min_count must be an Integer. Currently unsupported. Given: {}'.format(
_func_name, min_count))
def hpat_pandas_series_sum_impl(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_sum1
"""
if skipna is None:
_skipna = True
else:
_skipna = skipna
if _skipna:
return numpy.nansum(self._data)
return numpy.sum(self._data)
return hpat_pandas_series_sum_impl
@sdc_overload_method(SeriesType, 'take')
def hpat_pandas_series_take(self, indices, axis=0, is_copy=False):
"""
Pandas Series method :meth:`pandas.Series.take` implementation.
.. only:: developer
Tests: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_take_index_default
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_take_index_default_unboxing
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_take_index_int
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_take_index_int_unboxing
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_take_index_str
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_take_index_str_unboxing
Parameters
----------
self: :obj:`pandas.Series`
input series
indices: :obj:`array-like`
An array of ints indicating which positions to take
axis: {0 or `index`, 1 or `columns`, None}, default 0
The axis on which to select elements. 0 means that we are selecting rows,
1 means that we are selecting columns.
*unsupported*
is_copy: :obj:`bool`, default True
Whether to return a copy of the original object or not.
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object containing the elements taken from the object
"""
ty_checker = TypeChecker('Method take().')
ty_checker.check(self, SeriesType)
if (not isinstance(axis, (int, types.Integer, str, types.UnicodeType, types.StringLiteral, types.Omitted))
and axis not in (0, 'index')):
ty_checker.raise_exc(axis, 'integer or string', 'axis')
if not isinstance(is_copy, (bool, types.Boolean, types.Omitted)) and is_copy is not False:
ty_checker.raise_exc(is_copy, 'boolean', 'is_copy')
if not isinstance(indices, (types.List, types.Array)):
ty_checker.raise_exc(indices, 'array-like', 'indices')
if isinstance(self.index, types.NoneType) or self.index is None:
def hpat_pandas_series_take_noindex_impl(self, indices, axis=0, is_copy=False):
local_data = [self._data[i] for i in indices]
return pandas.Series(local_data, indices)
return hpat_pandas_series_take_noindex_impl
def hpat_pandas_series_take_impl(self, indices, axis=0, is_copy=False):
local_data = [self._data[i] for i in indices]
local_index = [self._index[i] for i in indices]
return pandas.Series(local_data, local_index)
return hpat_pandas_series_take_impl
@sdc_overload_method(SeriesType, 'idxmax')
def hpat_pandas_series_idxmax(self, axis=None, skipna=True):
"""
Pandas Series method :meth:`pandas.Series.idxmax` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmax1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmax_str_idx
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmax_noidx
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmax_idx
Parameters
-----------
axis : :obj:`int`, :obj:`str`, default: None
Axis along which the operation acts
0/None - row-wise operation
1 - column-wise operation
*unsupported*
skipna: :obj:`bool`, default: True
exclude NA/null values
*unsupported*
Returns
-------
:obj:`pandas.Series.index` or nan
returns: Label of the minimum value.
"""
_func_name = 'Method idxmax().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
raise TypingError('{} Numeric values supported only. Given: {}'.format(_func_name, self.data.dtype))
if not (isinstance(skipna, (types.Omitted, types.Boolean, bool)) or skipna is True):
raise TypingError("{} 'skipna' must be a boolean type. Given: {}".format(_func_name, skipna))
if not (isinstance(axis, types.Omitted) or axis is None):
raise TypingError("{} 'axis' unsupported. Given: {}".format(_func_name, axis))
if not (isinstance(skipna, types.Omitted) or skipna is True):
raise TypingError("{} 'skipna' unsupported. Given: {}".format(_func_name, skipna))
if isinstance(self.index, types.NoneType) or self.index is None:
def hpat_pandas_series_idxmax_impl(self, axis=None, skipna=True):
return numpy.argmax(self._data)
return hpat_pandas_series_idxmax_impl
else:
def hpat_pandas_series_idxmax_index_impl(self, axis=None, skipna=True):
# no numpy.nanargmax is supported by Numba at this time
result = numpy.argmax(self._data)
return self._index[int(result)]
return hpat_pandas_series_idxmax_index_impl
@sdc_overload_method(SeriesType, 'mul')
def hpat_pandas_series_mul(self, other, level=None, fill_value=None, axis=0):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.mul
Examples
--------
.. literalinclude:: ../../../examples/series/series_mul.py
:language: python
:lines: 27-
:caption: Element-wise multiplication of two Series
:name: ex_series_mul
.. command-output:: python ./series/series_mul.py
:cwd: ../../../examples
.. note::
Parameters level, fill_value, axis are currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.rmul <pandas.Series.rmul>`
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.mul` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: :obj:`int` default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method mul().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not isinstance(level, types.Omitted) and level is not None:
ty_checker.raise_exc(level, 'None', 'level')
if not isinstance(fill_value, types.Omitted) and fill_value is not None:
ty_checker.raise_exc(fill_value, 'None', 'fill_value')
if not isinstance(axis, types.Omitted) and axis != 0:
ty_checker.raise_exc(axis, 'int', 'axis')
if isinstance(other, SeriesType):
def hpat_pandas_series_mul_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
if axis != 0:
raise ValueError('Method mul(). The object axis\n expected: 0')
return pandas.Series(self._data * other._data)
return hpat_pandas_series_mul_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_mul_number_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
if axis != 0:
raise ValueError('Method mul(). The object axis\n expected: 0')
return pandas.Series(self._data * other)
return hpat_pandas_series_mul_number_impl
ty_checker.raise_exc(other, 'Series, int, float', 'other')
@sdc_overload_method(SeriesType, 'div')
def hpat_pandas_series_div(self, other, level=None, fill_value=None, axis=0):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.div
Examples
--------
.. literalinclude:: ../../../examples/series/series_div.py
:language: python
:lines: 27-
:caption: Element-wise division of one Series by another (binary operator div)
:name: ex_series_div
.. command-output:: python ./series/series_div.py
:cwd: ../../../examples
.. note::
Parameters level, fill_value, axis are currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.rdiv <pandas.Series.rdiv>`
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.div` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: :obj:`int` default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method div().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not (isinstance(level, types.Omitted) or level is None):
ty_checker.raise_exc(level, 'None', 'level')
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
ty_checker.raise_exc(fill_value, 'None', 'fill_value')
if not (isinstance(axis, types.Omitted) or axis == 0):
ty_checker.raise_exc(axis, 'int', 'axis')
if isinstance(other, SeriesType):
def hpat_pandas_series_div_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
if axis != 0:
raise ValueError('Method div(). The object axis\n expected: 0')
return pandas.Series(self._data / other._data)
return hpat_pandas_series_div_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_div_number_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
if axis != 0:
raise ValueError('Method div(). The object axis\n expected: 0')
return pandas.Series(self._data / other)
return hpat_pandas_series_div_number_impl
ty_checker.raise_exc(other, 'Series, int, float', 'other')
@sdc_overload_method(SeriesType, 'truediv')
def hpat_pandas_series_truediv(self, other, level=None, fill_value=None, axis=0):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.truediv
Examples
--------
.. literalinclude:: ../../../examples/series/series_truediv.py
:language: python
:lines: 27-
:caption: Element-wise division of one Series by another (binary operator truediv)
:name: ex_series_truediv
.. command-output:: python ./series/series_truediv.py
:cwd: ../../../examples
.. note::
Parameters level, fill_value, axis are currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.rtruediv <pandas.Series.rtruediv>`
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series :meth:`pandas.Series.truediv` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method truediv().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not (isinstance(level, types.Omitted) or level is None):
ty_checker.raise_exc(level, 'None', 'level')
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
ty_checker.raise_exc(fill_value, 'None', 'fill_value')
if not (isinstance(axis, types.Omitted) or axis == 0):
ty_checker.raise_exc(axis, 'int', 'axis')
if isinstance(other, SeriesType):
def hpat_pandas_series_truediv_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
if axis != 0:
raise ValueError('Method truediv(). The object axis\n expected: 0')
return pandas.Series(self._data / other._data)
return hpat_pandas_series_truediv_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_truediv_number_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
if axis != 0:
raise ValueError('Method truediv(). The object axis\n expected: 0')
return pandas.Series(self._data / other)
return hpat_pandas_series_truediv_number_impl
ty_checker.raise_exc(other, 'Series, int, float', 'other')
@sdc_overload_method(SeriesType, 'floordiv')
def hpat_pandas_series_floordiv(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.floordiv` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method floordiv().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_floordiv_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data // other._data)
return hpat_pandas_series_floordiv_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_floordiv_number_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(self._data // other)
return hpat_pandas_series_floordiv_number_impl
raise TypingError('{} The object must be a pandas.series or scalar. Given other: {}'.format(_func_name, other))
@sdc_overload_method(SeriesType, 'pow')
def hpat_pandas_series_pow(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.pow` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method pow().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_pow_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data ** other._data)
return hpat_pandas_series_pow_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_pow_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(self._data ** other)
return hpat_pandas_series_pow_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'prod')
def hpat_pandas_series_prod(self, axis=None, skipna=None, level=None, numeric_only=None, min_count=0):
"""
Pandas Series method :meth:`pandas.Series.prod` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_prod*
Parameters
-----------
self: :obj:`pandas.Series`
input series
axis: {index (0)}
Axis for the function to be applied on.
*unsupported*
skipna: :obj:`bool`, default :obj:`True`
Exclude nan values when computing the result
level: :obj:`int`, :obj:`str`, default :obj:`None`
If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a scalar.
*unsupported*
numeric_only: :obj:`bool`, default :obj:`None`
Include only float, int, boolean columns.
If None, will attempt to use everything, then use only numeric data.
Not implemented for Series.
*unsupported*
min_count: :obj:`int`, default 0
The required number of valid values to perform the operation.
If fewer than min_count non-NA values are present the result will be NA.
*unsupported*
Returns
-------
:obj:
Returns scalar or Series (if level specified)
"""
_func_name = 'Method prod().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, (types.Integer, types.Float)):
raise TypingError('{} Non numeric values unsupported. Given: {}'.format(_func_name, self.data.data.dtype))
if not (isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) or skipna is None or skipna is True):
raise TypingError("{} 'skipna' must be a boolean type. Given: {}".format(_func_name, skipna))
if not (isinstance(axis, (types.Omitted, types.NoneType)) or axis is None) \
or not (isinstance(level, (types.Omitted, types.NoneType)) or level is None) \
or not (isinstance(numeric_only, (types.Omitted, types.NoneType)) or numeric_only is None) \
or not (isinstance(min_count, (types.Omitted, types.Integer)) or min_count == 0):
raise TypingError(
'{} Unsupported parameters. Given axis: {}, level: {}, numeric_only: {}, min_count: {}'.format(
_func_name, axis, level, numeric_only, min_count))
def hpat_pandas_series_prod_impl(self, axis=None, skipna=None, level=None, numeric_only=None, min_count=0):
if skipna is None:
_skipna = True
else:
_skipna = skipna
if _skipna:
return numpy.nanprod(self._data)
else:
return numpy.prod(self._data)
return hpat_pandas_series_prod_impl
@sdc_overload_method(SeriesType, 'quantile')
def hpat_pandas_series_quantile(self, q=0.5, interpolation='linear'):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.quantile
Examples
--------
.. literalinclude:: ../../../examples/series/series_quantile.py
:language: python
:lines: 27-
:caption: Computing quantile for the Series
:name: ex_series_quantile
.. command-output:: python ./series/series_quantile.py
:cwd: ../../../examples
.. note::
Parameter interpolation is currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
`numpy.absolute <https://docs.scipy.org/doc/numpy/reference/generated/numpy.percentile.html#numpy.percentile>`_
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.quantile` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_quantile
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_quantile_q_vector
Parameters
-----------
q : :obj: float or array-like object, default 0.5
the quantile(s) to compute
interpolation: 'linear', 'lower', 'higher', 'midpoint', 'nearest', default `linear`
*unsupported* by Numba
Returns
-------
:obj:`pandas.Series` or float
"""
_func_name = 'Method quantile().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not isinstance(interpolation, types.Omitted) and interpolation is not 'linear':
ty_checker.raise_exc(interpolation, 'str', 'interpolation')
if not isinstance(q, (int, float, list, types.Number, types.Omitted, types.List)):
ty_checker.raise_exc(q, 'int, float, list', 'q')
def hpat_pandas_series_quantile_impl(self, q=0.5, interpolation='linear'):
return numpy.quantile(self._data, q)
return hpat_pandas_series_quantile_impl
@sdc_overload_method(SeriesType, 'rename')
def hpat_pandas_series_rename(self, index=None, copy=True, inplace=False, level=None):
"""
Pandas Series method :meth:`pandas.Series.rename` implementation.
Alter Series index labels or name.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_rename
Parameters
-----------
index : :obj:`scalar` or `hashable sequence` or `dict` or `function`
Dict-like or functions are transformations to apply to the index.
Scalar or hashable sequence-like will alter the Series.name attribute.
Only scalar value is supported.
copy : :obj:`bool`, default :obj:`True`
Whether to copy underlying data.
inplace : :obj:`bool`, default :obj:`False`
Whether to return a new Series. If True then value of copy is ignored.
level : :obj:`int` or `str`
In case of a MultiIndex, only rename labels in the specified level.
*Not supported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` with index labels or name altered.
"""
ty_checker = TypeChecker('Method rename().')
ty_checker.check(self, SeriesType)
if not isinstance(index, (types.Omitted, types.UnicodeType,
types.StringLiteral, str,
types.Integer, types.Boolean,
types.Hashable, types.Float,
types.NPDatetime, types.NPTimedelta,
types.Number)) and index is not None:
ty_checker.raise_exc(index, 'string', 'index')
if not isinstance(copy, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(copy, 'boolean', 'copy')
if not isinstance(inplace, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(inplace, 'boolean', 'inplace')
if not isinstance(level, (types.Omitted, types.UnicodeType,
types.StringLiteral, types.Integer)) and level is not None:
ty_checker.raise_exc(level, 'Integer or srting', 'level')
def hpat_pandas_series_rename_idx_impl(self, index=None, copy=True, inplace=False, level=None):
if copy is True:
series_data = self._data.copy()
series_index = self._index.copy()
else:
series_data = self._data
series_index = self._index
return pandas.Series(data=series_data, index=series_index, name=index)
def hpat_pandas_series_rename_noidx_impl(self, index=None, copy=True, inplace=False, level=None):
if copy is True:
series_data = self._data.copy()
else:
series_data = self._data
return pandas.Series(data=series_data, index=self._index, name=index)
if isinstance(self.index, types.NoneType):
return hpat_pandas_series_rename_noidx_impl
return hpat_pandas_series_rename_idx_impl
@sdc_overload_method(SeriesType, 'min')
def hpat_pandas_series_min(self, axis=None, skipna=None, level=None, numeric_only=None):
"""
Pandas Series method :meth:`pandas.Series.min` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_min*
Parameters
-----------
axis:
*unsupported*
skipna: :obj:`bool` object
Exclude nan values when computing the result
level:
*unsupported*
numeric_only:
*unsupported*
Returns
-------
:obj:
returns :obj: scalar
"""
_func_name = 'Method min().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, (types.Integer, types.Float)):
raise TypingError(
'{} Currently function supports only numeric values. Given data type: {}'.format(
_func_name, self.data.dtype))
if not isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) and skipna is not True \
and skipna is not None:
raise TypingError(
'{} The parameter must be a boolean type. Given type skipna: {}'.format(_func_name, skipna))
if not (isinstance(axis, types.Omitted) or axis is None) \
or not (isinstance(level, (types.Omitted, types.NoneType)) or level is None) \
or not (isinstance(numeric_only, types.Omitted) or numeric_only is None):
raise TypingError(
'{} Unsupported parameters. Given axis: {}, level: {}, numeric_only: {}'.format(_func_name, axis, level,
numeric_only))
def hpat_pandas_series_min_impl(self, axis=None, skipna=None, level=None, numeric_only=None):
if skipna is None:
_skipna = True
else:
_skipna = skipna
if _skipna:
return numpy.nanmin(self._data)
return self._data.min()
return hpat_pandas_series_min_impl
@sdc_overload_method(SeriesType, 'max')
def hpat_pandas_series_max(self, axis=None, skipna=None, level=None, numeric_only=None):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.max
Examples
--------
.. literalinclude:: ../../../examples/series/series_max.py
:language: python
:lines: 27-
:caption: Getting the maximum value of Series elements
:name: ex_series_max
.. command-output:: python ./series/series_max.py
:cwd: ../../../examples
.. note::
Parameters axis, level, numeric_only are currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.sum <pandas.Series.sum>`
Return the sum.
:ref:`Series.min <pandas.Series.min>`
Return the minimum.
:ref:`Series.max <pandas.Series.max>`
Return the maximum.
:ref:`Series.idxmin <pandas.Series.idxmin>`
Return the index of the minimum.
:ref:`Series.idxmax <pandas.Series.idxmax>`
Return the index of the maximum.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.max` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_max*
Parameters
-----------
axis:
*unsupported*
skipna: :obj:`bool` object
Exclude nan values when computing the result
level:
*unsupported*
numeric_only:
*unsupported*
Returns
-------
:obj:
returns :obj: scalar
"""
_func_name = 'Method max().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not isinstance(self.data.dtype, (types.Integer, types.Float)):
raise TypingError(
'{} Currently function supports only numeric values. Given data type: {}'.format(
_func_name, self.data.dtype))
if not (isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) or skipna is True or skipna is None):
ty_checker.raise_exc(skipna, 'bool', 'skipna')
if not isinstance(axis, types.Omitted) and axis is not None:
ty_checker.raise_exc(axis, 'None', 'axis')
if not isinstance(level, (types.Omitted, types.NoneType)) and level is not None:
ty_checker.raise_exc(level, 'None', 'level')
if not isinstance(numeric_only, types.Omitted) and numeric_only is not None:
ty_checker.raise_exc(numeric_only, 'None', 'numeric_only')
def hpat_pandas_series_max_impl(self, axis=None, skipna=None, level=None, numeric_only=None):
if skipna is None:
_skipna = True
else:
_skipna = skipna
if _skipna:
return numpy.nanmax(self._data)
return self._data.max()
return hpat_pandas_series_max_impl
@sdc_overload_method(SeriesType, 'mean')
def hpat_pandas_series_mean(self, axis=None, skipna=None, level=None, numeric_only=None):
"""
Pandas Series method :meth:`pandas.Series.mean` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_mean*
Parameters
-----------
axis: {index (0)}
Axis for the function to be applied on.
*unsupported*
skipna: :obj:`bool`, default True
Exclude NA/null values when computing the result.
level: :obj:`int` or level name, default None
If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a scalar.
*unsupported*
numeric_only: :obj:`bool`, default None
Include only float, int, boolean columns.
If None, will attempt to use everything, then use only numeric data. Not implemented for Series.
*unsupported*
Returns
-------
:obj:
Return the mean of the values for the requested axis.
"""
_func_name = 'Method mean().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
raise TypingError(
'{} Currently function supports only numeric values. Given data type: {}'.format(
_func_name, self.data.dtype))
if not isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) and skipna is not None:
raise TypingError(
'{} The parameter must be a boolean type. Given type skipna: {}'.format(_func_name, skipna))
if not (isinstance(axis, types.Omitted) or axis is None) \
or not (isinstance(level, (types.Omitted, types.NoneType)) or level is None) \
or not (isinstance(numeric_only, types.Omitted) or numeric_only is None):
raise TypingError(
'{} Unsupported parameters. Given axis: {}, level: {}, numeric_only: {}'.format(_func_name, axis, level,
numeric_only))
def hpat_pandas_series_mean_impl(self, axis=None, skipna=None, level=None, numeric_only=None):
if skipna is None:
_skipna = True
else:
_skipna = skipna
if _skipna:
return numpy.nanmean(self._data)
return self._data.mean()
return hpat_pandas_series_mean_impl
@sdc_overload_method(SeriesType, 'mod')
def hpat_pandas_series_mod(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.mod` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method mod().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_mod_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data % other._data)
return hpat_pandas_series_mod_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_mod_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(self._data % other)
return hpat_pandas_series_mod_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'eq')
def hpat_pandas_series_eq(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.eq` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method eq().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_eq_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data == other._data)
return hpat_pandas_series_eq_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_eq_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data == other)
return hpat_pandas_series_eq_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'ge')
def hpat_pandas_series_ge(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.ge` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method ge().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_ge_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data >= other._data)
return hpat_pandas_series_ge_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_ge_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data >= other)
return hpat_pandas_series_ge_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'idxmin')
def hpat_pandas_series_idxmin(self, axis=None, skipna=True):
"""
Pandas Series method :meth:`pandas.Series.idxmin` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin_str_idx
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin_no
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin_int
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin_noidx
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin_idx
Parameters
-----------
axis : :obj:`int`, :obj:`str`, default: None
Axis along which the operation acts
0/None - row-wise operation
1 - column-wise operation
*unsupported*
skipna: :obj:`bool`, default: True
exclude NA/null values
*unsupported*
Returns
-------
:obj:`pandas.Series.index` or nan
returns: Label of the minimum value.
"""
_func_name = 'Method idxmin().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
raise TypingError('{} Numeric values supported only. Given: {}'.format(_func_name, self.data.dtype))
if not (isinstance(skipna, (types.Omitted, types.Boolean, bool)) or skipna is True):
raise TypingError("{} 'skipna' must be a boolean type. Given: {}".format(_func_name, skipna))
if not (isinstance(axis, types.Omitted) or axis is None):
raise TypingError("{} 'axis' unsupported. Given: {}".format(_func_name, axis))
if not (isinstance(skipna, types.Omitted) or skipna is True):
raise TypingError("{} 'skipna' unsupported. Given: {}".format(_func_name, skipna))
if isinstance(self.index, types.NoneType) or self.index is None:
def hpat_pandas_series_idxmin_impl(self, axis=None, skipna=True):
return numpy.argmin(self._data)
return hpat_pandas_series_idxmin_impl
else:
def hpat_pandas_series_idxmin_index_impl(self, axis=None, skipna=True):
# no numpy.nanargmin is supported by Numba at this time
result = numpy.argmin(self._data)
return self._index[int(result)]
return hpat_pandas_series_idxmin_index_impl
@sdc_overload_method(SeriesType, 'lt')
def hpat_pandas_series_lt(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.lt` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method lt().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_lt_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data < other._data)
return hpat_pandas_series_lt_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_lt_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data < other)
return hpat_pandas_series_lt_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'gt')
def hpat_pandas_series_gt(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.gt` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method gt().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_gt_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data > other._data)
return hpat_pandas_series_gt_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_gt_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data > other)
return hpat_pandas_series_gt_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'le')
def hpat_pandas_series_le(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.le` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method le().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_le_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data <= other._data)
return hpat_pandas_series_le_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_le_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data <= other)
return hpat_pandas_series_le_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'abs')
def hpat_pandas_series_abs(self):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.abs
Examples
--------
.. literalinclude:: ../../../examples/series/series_abs.py
:language: python
:lines: 27-
:caption: Getting the absolute value of each element in Series
:name: ex_series_abs
.. command-output:: python ./series/series_abs.py
:cwd: ../../../examples
.. seealso::
`numpy.absolute <https://docs.scipy.org/doc/numpy/reference/generated/numpy.absolute.html>`_
Calculate the absolute value element-wise.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.abs` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_abs1
Parameters
-----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` containing the absolute value of elements
"""
_func_name = 'Method abs().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not isinstance(self.dtype, (types.Integer, types.Float)):
raise TypingError(
'{} The function only applies to elements that are all numeric. Given data type: {}'.format(_func_name,
self.dtype))
def hpat_pandas_series_abs_impl(self):
return pandas.Series(numpy.abs(self._data))
return hpat_pandas_series_abs_impl
@sdc_overload_method(SeriesType, 'unique')
def hpat_pandas_series_unique(self):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.unique
Examples
--------
.. literalinclude:: ../../../examples/series/series_unique.py
:language: python
:lines: 27-
:caption: Getting unique values in Series
:name: ex_series_unique
.. command-output:: python ./series/series_unique.py
:cwd: ../../../examples
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.unique` implementation.
Note: Return values order is unspecified
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_unique_sorted
Parameters
-----------
self: :class:`pandas.Series`
input arg
Returns
-------
:obj:`numpy.array`
returns :obj:`numpy.array` ndarray
"""
ty_checker = TypeChecker('Method unique().')
ty_checker.check(self, SeriesType)
if isinstance(self.data, StringArrayType):
def hpat_pandas_series_unique_str_impl(self):
'''
Returns sorted unique elements of an array
Note: Can't use Numpy due to StringArrayType has no ravel() for noPython mode.
Also, NotImplementedError: unicode_type cannot be represented as a Numpy dtype
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_unique_str
'''
str_set = set(self._data)
return to_array(str_set)
return hpat_pandas_series_unique_str_impl
def hpat_pandas_series_unique_impl(self):
'''
Returns sorted unique elements of an array
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_unique
'''
return numpy.unique(self._data)
return hpat_pandas_series_unique_impl
@sdc_overload_method(SeriesType, 'cumsum')
def hpat_pandas_series_cumsum(self, axis=None, skipna=True):
"""
Pandas Series method :meth:`pandas.Series.cumsum` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_cumsum
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_cumsum_unboxing
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_cumsum_full
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_cumsum_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_cumsum_unsupported_axis
Parameters
----------
self: :obj:`pandas.Series`
input series
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
skipna: :obj:`bool`
exclude NA/null values
*args:
*unsupported*
Returns
-------
:obj:`scalar`, :obj:`pandas.Series`
returns :obj:`scalar` or :obj:`pandas.Series` object
"""
_func_name = 'Method cumsum().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
msg = '{} The object must be a number. Given self.data.dtype: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
if not isinstance(axis, (types.Omitted, types.NoneType)) and axis is not None:
raise TypingError('{} Unsupported parameters. Given axis: {}'.format(_func_name, axis))
def hpat_pandas_series_cumsum_impl(self, axis=None, skipna=True):
if skipna:
# nampy.nancumsum replaces NANs with 0, series.cumsum does not, so replace back 0 with NANs
local_data = numpy.nancumsum(self._data)
local_data[numpy.isnan(self._data)] = numpy.nan
return pandas.Series(local_data)
return pandas.Series(self._data.cumsum())
return hpat_pandas_series_cumsum_impl
@sdc_overload_method(SeriesType, 'nunique')
def hpat_pandas_series_nunique(self, dropna=True):
"""
Pandas Series method :meth:`pandas.Series.nunique` implementation.
Note: Unsupported mixed numeric and string data
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_nunique
Parameters
-----------
self: :obj:`pandas.Series`
input series
dropna: :obj:`bool`, default True
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method nunique().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if isinstance(self.data, StringArrayType):
def hpat_pandas_series_nunique_str_impl(self, dropna=True):
"""
It is better to merge with Numeric branch
"""
data = self._data
if dropna:
nan_mask = self.isna()
data = self._data[~nan_mask._data]
unique_values = set(data)
return len(unique_values)
return hpat_pandas_series_nunique_str_impl
def hpat_pandas_series_nunique_impl(self, dropna=True):
"""
This function for Numeric data because NumPy dosn't support StringArrayType
Algo looks a bit ambigous because, currently, set() can not be used with NumPy with Numba JIT
"""
data_mask_for_nan = numpy.isnan(self._data)
nan_exists = numpy.any(data_mask_for_nan)
data_no_nan = self._data[~data_mask_for_nan]
data_set = set(data_no_nan)
if dropna or not nan_exists:
return len(data_set)
else:
return len(data_set) + 1
return hpat_pandas_series_nunique_impl
@sdc_overload_method(SeriesType, 'count')
def hpat_pandas_series_count(self, level=None):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.count
Examples
--------
.. literalinclude:: ../../../examples/series/series_count.py
:language: python
:lines: 27-
:caption: Counting non-NaN values in Series
:name: ex_series_count
.. command-output:: python ./series/series_count.py
:cwd: ../../../examples
.. note::
Parameter level is currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.value_counts <pandas.Series.value_counts>`
:ref:`Series.value_counts <pandas.Series.value_counts>`
:ref:`Series.str.len <pandas.Series.str.len>`
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.count` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_count
Parameters
-----------
self: :obj:`pandas.Series`
input series
level: :obj:`int` or name
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method count().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not isinstance(level, (types.Omitted, types.NoneType)) and level is not None:
ty_checker.raise_exc(level, 'None', 'level')
if isinstance(self.data, StringArrayType):
def hpat_pandas_series_count_str_impl(self, level=None):
nan_mask = self.isna()
return numpy.sum(nan_mask._data == 0)
return hpat_pandas_series_count_str_impl
def hpat_pandas_series_count_impl(self, level=None):
"""
Return number of non-NA/null observations in the object
Returns number of unique elements in the object
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_count
"""
data_no_nan = self._data[~numpy.isnan(self._data)]
return len(data_no_nan)
return hpat_pandas_series_count_impl
@sdc_overload_method(SeriesType, 'median')
def hpat_pandas_series_median(self, axis=None, skipna=None, level=None, numeric_only=None):
"""
Pandas Series method :meth:`pandas.Series.median` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_median1*
Parameters
-----------
self: :obj:`pandas.Series`
input series
axis: :obj:`int` or :obj:`string` {0 or `index`, None}, default None
The axis for the function to be applied on.
*unsupported*
skipna: :obj:`bool`, default True
exclude NA/null values when computing the result
level: :obj:`int` or :obj:`string`, default None
*unsupported*
numeric_only: :obj:`bool` or None, default None
*unsupported*
Returns
-------
:obj:`float` or :obj:`pandas.Series` (if level is specified)
median of values in the series
"""
_func_name = 'Method median().'
if not isinstance(self, SeriesType):
raise TypingError(
'{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if not isinstance(self.dtype, types.Number):
raise TypingError(
'{} The function only applies to elements that are all numeric. Given data type: {}'.format(
_func_name, self.dtype))
if not (isinstance(axis, (types.Integer, types.UnicodeType, types.Omitted)) or axis is None):
raise TypingError(
'{} The axis must be an Integer or a String. Currently unsupported. Given: {}'.format(
_func_name, axis))
if not (isinstance(skipna, (types.Boolean, types.Omitted, types.NoneType)) or skipna or skipna is None):
raise TypingError('{} The is_copy must be a boolean. Given: {}'.format(_func_name, skipna))
if not ((level is None or isinstance(level, (types.Omitted, types.NoneType)))
and (numeric_only is None or isinstance(numeric_only, types.Omitted))
and (axis is None or isinstance(axis, types.Omitted))
):
raise TypingError(
'{} Unsupported parameters. Given level: {}, numeric_only: {}, axis: {}'.format(
_func_name, level, numeric_only, axis))
def hpat_pandas_series_median_impl(self, axis=None, skipna=None, level=None, numeric_only=None):
if skipna is None:
_skipna = True
else:
_skipna = skipna
if _skipna:
return numpy.nanmedian(self._data)
return numpy.median(self._data)
return hpat_pandas_series_median_impl
@sdc_overload_method(SeriesType, 'argsort')
def hpat_pandas_series_argsort(self, axis=0, kind='quicksort', order=None):
"""
Pandas Series method :meth:`pandas.Series.argsort` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_argsort*
Parameters
-----------
self: :class:`pandas.Series`
input series
axis: :obj:`int`
Has no effect but is accepted for compatibility with numpy.
*unsupported*
kind: :obj:'str', {'mergesort', 'quicksort', 'heapsort'}, default: 'quicksort'
Choice of sorting algorithm. See np.sort for more information. 'mergesort' is the only stable algorithm
*uses python func - sorted() for str and numpy func - sort() for num*
*'heapsort' unsupported*
order: :obj:`str` or :obj:`list of str`, default: None
Has no effect but is accepted for compatibility with numpy.
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns: Positions of values within the sort order with -1 indicating nan values.
"""
_func_name = 'Method argsort().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
raise TypingError('{} Non-numeric type unsupported. Given: {}'.format(_func_name, self.data.dtype))
if not (isinstance(axis, types.Omitted) or isinstance(axis, types.Integer) or axis == 0):
raise TypingError('{} Unsupported parameters. Given axis: {}'.format(_func_name, axis))
if not isinstance(kind, (types.Omitted, str, types.UnicodeType, types.StringLiteral)):
raise TypingError('{} Non-string type unsupported. Given kind: {}'.format(_func_name, kind))
if not isinstance(order, (str, types.UnicodeType, types.StringLiteral, types.Omitted, types.NoneType, types.List))\
and order is not None:
raise TypingError('{} Unsupported parameters. Given order: {}'.format(_func_name, order))
if not isinstance(self.index, types.NoneType):
def hpat_pandas_series_argsort_idx_impl(self, axis=0, kind='quicksort', order=None):
if kind != 'quicksort' and kind != 'mergesort':
raise ValueError("Method argsort(). Unsupported parameter. Given 'kind' != 'quicksort' or 'mergesort'")
if kind == 'mergesort':
#It is impossible to use numpy.argsort(self._data, kind=kind) since numba gives typing error
sort = numpy.argsort(self._data, kind='mergesort')
else:
sort = numpy.argsort(self._data)
na = self.isna().sum()
result = numpy.empty(len(self._data), dtype=numpy.int64)
na_data_arr = sdc.hiframes.api.get_nan_mask(self._data)
if kind == 'mergesort':
sort_nona = numpy.argsort(self._data[~na_data_arr], kind='mergesort')
else:
sort_nona = numpy.argsort(self._data[~na_data_arr])
q = 0
for id, i in enumerate(sort):
if id in set(sort[len(self._data) - na:]):
q += 1
else:
result[id] = sort_nona[id - q]
for i in sort[len(self._data) - na:]:
result[i] = -1
return pandas.Series(result, self._index)
return hpat_pandas_series_argsort_idx_impl
def hpat_pandas_series_argsort_noidx_impl(self, axis=0, kind='quicksort', order=None):
if kind != 'quicksort' and kind != 'mergesort':
raise ValueError("Method argsort(). Unsupported parameter. Given 'kind' != 'quicksort' or 'mergesort'")
if kind == 'mergesort':
sort = numpy.argsort(self._data, kind='mergesort')
else:
sort = numpy.argsort(self._data)
na = self.isna().sum()
result = numpy.empty(len(self._data), dtype=numpy.int64)
na_data_arr = sdc.hiframes.api.get_nan_mask(self._data)
if kind == 'mergesort':
sort_nona = numpy.argsort(self._data[~na_data_arr], kind='mergesort')
else:
sort_nona = numpy.argsort(self._data[~na_data_arr])
q = 0
for id, i in enumerate(sort):
if id in set(sort[len(self._data) - na:]):
q += 1
else:
result[id] = sort_nona[id - q]
for i in sort[len(self._data) - na:]:
result[i] = -1
return pandas.Series(result)
return hpat_pandas_series_argsort_noidx_impl
@sdc_overload_method(SeriesType, 'sort_values')
def hpat_pandas_series_sort_values(self, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'):
"""
Pandas Series method :meth:`pandas.Series.sort_values` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_sort_values*
Parameters
-----------
self: :class:'pandas.Series'
input series
axis: 0 or :obj:'pandas.Series.index', default: 0
Axis to direct sorting.
*unsupported*
ascending: :obj:'bool', default: True
If True, sort values in ascending order, otherwise descending.
kind: :obj:'str', {'mergesort', 'quicksort', 'heapsort'}, default: 'quicksort'
Choice of sorting algorithm.
*uses python func - sorted() for str and numpy func - sort() for num*
*'heapsort' unsupported*
na_position: {'first' or 'last'}, default 'last'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end.
*unsupported*
Returns
-------
:obj:`pandas.Series`
"""
_func_name = 'Method sort_values().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(axis, types.Omitted) or isinstance(axis, types.Integer) or axis == 0):
raise TypingError('{} Unsupported parameters. Given axis: {}'.format(_func_name, axis))
if not (isinstance(ascending, types.Omitted) or isinstance(ascending, types.Boolean) or ascending is True or False):
raise TypingError('{} Unsupported parameters. Given ascending: {}'.format(_func_name, ascending))
if not isinstance(kind, (types.Omitted, str, types.UnicodeType, types.StringLiteral)):
raise TypingError('{} Non-string type unsupported. Given kind: {}'.format(_func_name, kind))
if not isinstance(na_position, (types.Omitted, str, types.UnicodeType, types.StringLiteral)):
raise TypingError('{} Unsupported parameters. Given na_position: {}'.format(_func_name, na_position))
if isinstance(self.index, types.NoneType) and isinstance(self.data.dtype, types.UnicodeType):
def hpat_pandas_series_sort_values_str_noidx_impl(self, axis=0, ascending=True, inplace=False, kind='quicksort',
na_position='last'):
if kind != 'quicksort' and kind != 'mergesort':
raise ValueError("Method sort_values(). Unsupported parameter. Given kind != 'quicksort', 'mergesort'")
index = numpy.arange(len(self._data))
my_index = numpy.arange(len(self._data))
used_index = numpy.full((len(self._data)), -1)
result = sorted(self._data)
cycle = range(len(self._data))
if not ascending:
result = result[::-1]
cycle = range(len(self._data) - 1, -1, -1)
result_index = index.copy()
for i in range(len(result_index)):
find = 0
for search in cycle:
check = 0
for j in used_index:
if my_index[search] == j:
check = 1
if (self._data[search] == result[i]) and check == 0 and find == 0:
result_index[i] = index[search]
used_index[i] = my_index[search]
find = 1
na = self.isna().sum()
num = 0
for i in self.isna():
j = len(result_index) - na
if i and used_index[j] == -1:
result_index[j] = index[num]
used_index[j] = my_index[num]
na -= 1
num += 1
return pandas.Series(result, result_index)
return hpat_pandas_series_sort_values_str_noidx_impl
if isinstance(self.index, types.NoneType) and isinstance(self.data.dtype, types.Number):
def hpat_pandas_series_sort_values_num_noidx_impl(self, axis=0, ascending=True, inplace=False, kind='quicksort',
na_position='last'):
if kind != 'quicksort' and kind != 'mergesort':
raise ValueError("Method sort_values(). Unsupported parameter. Given kind != 'quicksort', 'mergesort'")
na = self.isna().sum()
indices = numpy.arange(len(self._data))
if kind == 'mergesort':
# It is impossible to use numpy.argsort(self._data, kind=kind) since numba gives typing error
index_result = numpy.argsort(self._data, kind='mergesort')
else:
index_result = numpy.argsort(self._data)
result = numpy.sort(self._data)
i = len(self._data) - na
index_result[i:] = index_result[i:][::-1]
if not ascending:
index_result[:i] = index_result[:i][::-1]
result[:i] = result[:i][::-1]
for i in range(len(index_result)):
indices[i] = index_result[i]
return pandas.Series(result, indices)
return hpat_pandas_series_sort_values_num_noidx_impl
if isinstance(self.data.dtype, types.UnicodeType):
def hpat_pandas_series_sort_values_str_idx_impl(self, axis=0, ascending=True, inplace=False, kind='quicksort',
na_position='last'):
if kind != 'quicksort' and kind != 'mergesort':
raise ValueError("Method sort_values(). Unsupported parameter. Given kind != 'quicksort', 'mergesort'")
index = self._index
my_index = numpy.arange(len(self._data))
used_index = numpy.full((len(self._data)), -1)
result = sorted(self._data)
cycle = range(len(self._data))
if not ascending:
result = result[::-1]
cycle = range(len(self._data) - 1, -1, -1)
result_index = self._index.copy()
for i in range(len(result_index)):
find = 0
for search in cycle:
check = 0
for j in used_index:
if my_index[search] == j:
check = 1
if (self._data[search] == result[i]) and check == 0 and find == 0:
result_index[i] = index[search]
used_index[i] = my_index[search]
find = 1
na = self.isna().sum()
num = 0
for i in self.isna():
j = len(result_index) - na
if i and used_index[j] == -1:
result_index[j] = index[num]
used_index[j] = my_index[num]
na -= 1
num += 1
return pandas.Series(result, result_index)
return hpat_pandas_series_sort_values_str_idx_impl
if isinstance(self.data.dtype, types.Number):
def hpat_pandas_series_sort_values_num_idx_impl(self, axis=0, ascending=True, inplace=False, kind='quicksort',
na_position='last'):
if kind != 'quicksort' and kind != 'mergesort':
raise ValueError("Method sort_values(). Unsupported parameter. Given kind != 'quicksort', 'mergesort'")
na = self.isna().sum()
indices = self._index.copy()
if kind == 'mergesort':
index_result = numpy.argsort(self._data, kind='mergesort')
else:
index_result = numpy.argsort(self._data)
result = numpy.sort(self._data)
i = len(self._data) - na
index_result[i:] = index_result[i:][::-1]
if not ascending:
index_result[:i] = index_result[:i][::-1]
result[:i] = result[:i][::-1]
for i in range(len(index_result)):
indices[i] = self._index[index_result[i]]
return pandas.Series(result, indices)
return hpat_pandas_series_sort_values_num_idx_impl
@sdc_overload_method(SeriesType, 'dropna')
def hpat_pandas_series_dropna(self, axis=0, inplace=False):
"""
Pandas Series method :meth:`pandas.Series.dropna` implementation.
.. only:: developer
Tests: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_dropna*
Parameters
----------
self: :obj:`pandas.Series`
input series
axis: :obj:`int` or :obj:`string` {0 or `index`}, default 0
There is only one axis to drop values from.
inplace: :obj:`bool`, default False
If True, do operation inplace and return None.
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object with NA entries dropped from it.
"""
_func_name = 'Method dropna().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(axis, (types.Integer, types.StringLiteral, types.UnicodeType, types.Omitted)) or axis == 0):
raise TypingError('{} The axis must be an Integer or String. Given: {}'.format(_func_name, axis))
if not (inplace is False or isinstance(inplace, types.Omitted)):
raise TypingError('{} Unsupported parameters. Given inplace: {}'.format(_func_name, inplace))
def hpat_pandas_series_dropna_impl(self, axis=0, inplace=False):
# generate Series index if needed by using SeriesType.index (i.e. not self._index)
na_data_arr = sdc.hiframes.api.get_nan_mask(self._data)
data = self._data[~na_data_arr]
index = self.index[~na_data_arr]
return pandas.Series(data, index, self._name)
return hpat_pandas_series_dropna_impl
@sdc_overload_method(SeriesType, 'fillna')
def hpat_pandas_series_fillna(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None):
"""
Pandas Series method :meth:`pandas.Series.fillna` implementation.
.. only:: developer
Tests: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_fillna*
Parameters
----------
self: :obj:`pandas.Series`
input series
value: scalar of the same dtype as input Series (other types currently unsupported), default None
Value to use to fill the NA elements
method: :obj:`string` {`backfill`, `bfill`, `pad`, `ffill`, None}, default None
Method to use for filling holes in reindexed Series.
*unsupported*
axis: :obj:`int` or :obj:`string` {0 or `index`}, default None
There is only one axis to drop values from.
inplace: :obj:`bool`, default False
If True, do operation inplace and return None.
Supported as literal value only
limit: :obj:`int`, default None
If method is specified, this is the maximum number of consecutive NaN
values to forward/backward fill.
*unsupported*
downcast: :obj:`dict` or :obj:`string` {`infer`}, default None
Controls logic of downcasting elements to particular dtype
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` with missed values filled.
"""
_func_name = 'Method fillna().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(axis, (types.Integer, types.StringLiteral, types.UnicodeType, types.Omitted)) or axis is None):
raise TypingError('{} The axis must be an Integer or String. Given: {}'.format(_func_name, axis))
if not (isinstance(inplace, types.Literal) and isinstance(inplace, types.Boolean)
or isinstance(inplace, types.Omitted)
or inplace is False):
raise TypingError('{} The inplace must be a literal Boolean constant. Given: {}'.format(_func_name, inplace))
if not (
(method is None or isinstance(method, types.Omitted))
and (limit is None or isinstance(limit, types.Omitted))
and (downcast is None or isinstance(downcast, types.Omitted))
):
raise TypingError('{} Unsupported parameters. Given method: {}, limit: {}, downcast: {}'.format(
_func_name, method, limit, downcast))
# inplace value has to be known at compile time to select between implementations with different signatures
if ((isinstance(inplace, types.Literal) and inplace.literal_value == True)
or (isinstance(inplace, bool) and inplace == True)):
# do operation inplace, fill the NA/NaNs in the same array and return None
if isinstance(self.dtype, types.UnicodeType):
# TODO: StringArrayType cannot resize inplace, and assigning a copy back to self._data is not possible now
raise TypingError('{} Not implemented when Series dtype is {} and\
inplace={}'.format(_func_name, self.dtype, inplace))
elif isinstance(self.dtype, (types.Integer, types.Boolean)):
def hpat_pandas_series_no_nan_fillna_impl(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None):
# no NaNs in series of Integers or Booleans
return None
return hpat_pandas_series_no_nan_fillna_impl
else:
def hpat_pandas_series_fillna_impl(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None):
na_data_arr = sdc.hiframes.api.get_nan_mask(self._data)
self._data[na_data_arr] = value
return None
return hpat_pandas_series_fillna_impl
else:
# non inplace implementations, copy array, fill the NA/NaN and return a new Series
if isinstance(self.dtype, types.UnicodeType):
# For StringArrayType implementation is taken from _series_fillna_str_alloc_impl
# (can be called directly when it's index handling is fixed)
def hpat_pandas_series_str_fillna_impl(self, value=None, method=None, axis=None,
inplace=False, limit=None, downcast=None):
n = len(self._data)
num_chars = 0
# get total chars in new array
for i in numba.parfor.internal_prange(n):
s = self._data[i]
if sdc.hiframes.api.isna(self._data, i):
num_chars += len(value)
else:
num_chars += len(s)
filled_data = sdc.str_arr_ext.pre_alloc_string_array(n, num_chars)
for i in numba.parfor.internal_prange(n):
if sdc.hiframes.api.isna(self._data, i):
filled_data[i] = value
else:
filled_data[i] = self._data[i]
return pandas.Series(filled_data, self._index, self._name)
return hpat_pandas_series_str_fillna_impl
elif isinstance(self.dtype, (types.Integer, types.Boolean)):
def hpat_pandas_series_no_nan_fillna_impl(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None):
return pandas.Series(numpy.copy(self._data), self._index, self._name)
return hpat_pandas_series_no_nan_fillna_impl
else:
def hpat_pandas_series_fillna_impl(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None):
na_data_arr = sdc.hiframes.api.get_nan_mask(self._data)
filled_data = numpy.copy(self._data)
filled_data[na_data_arr] = value
return | pandas.Series(filled_data, self._index, self._name) | pandas.Series |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, date_range
import pandas._testing as tm
class TestDataFrameUpdate:
def test_update_nan(self):
# #15593 #15617
# test 1
df1 = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)})
df2 = DataFrame({"A": [None, 2, 3]})
expected = df1.copy()
df1.update(df2, overwrite=False)
tm.assert_frame_equal(df1, expected)
# test 2
df1 = DataFrame({"A": [1.0, None, 3], "B": date_range("2000", periods=3)})
df2 = DataFrame({"A": [None, 2, 3]})
expected = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)})
df1.update(df2, overwrite=False)
tm.assert_frame_equal(df1, expected)
def test_update(self):
df = DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other)
expected = DataFrame(
[[1.5, np.nan, 3], [3.6, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
)
tm.assert_frame_equal(df, expected)
def test_update_dtypes(self):
# gh 3016
df = DataFrame(
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
df.update(other)
expected = DataFrame(
[[45.0, 45.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
tm.assert_frame_equal(df, expected)
def test_update_nooverwrite(self):
df = DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other, overwrite=False)
expected = DataFrame(
[[1.5, np.nan, 3], [1.5, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 3.0]]
)
tm.assert_frame_equal(df, expected)
def test_update_filtered(self):
df = DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other, filter_func=lambda x: x > 2)
expected = DataFrame(
[[1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"bad_kwarg, exception, msg",
[
# errors must be 'ignore' or 'raise'
({"errors": "something"}, ValueError, "The parameter errors must.*"),
({"join": "inner"}, NotImplementedError, "Only left join is supported"),
],
)
def test_update_raise_bad_parameter(self, bad_kwarg, exception, msg):
df = DataFrame([[1.5, 1, 3.0]])
with pytest.raises(exception, match=msg):
df.update(df, **bad_kwarg)
def test_update_raise_on_overlap(self):
df = DataFrame(
[[1.5, 1, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[2.0, np.nan], [np.nan, 7]], index=[1, 3], columns=[1, 2])
with pytest.raises(ValueError, match="Data overlaps"):
df.update(other, errors="raise")
def test_update_from_non_df(self):
d = {"a": Series([1, 2, 3, 4]), "b": Series([5, 6, 7, 8])}
df = DataFrame(d)
d["a"] = | Series([5, 6, 7, 8]) | pandas.Series |
from typing import List
import os
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
def cka_wide(X, Y):
"""
Calculate CKA for two matrices. This algorithm uses a Gram matrix
implementation, which is fast when the data is wider than it is
tall.
This implementation is inspired by the one in this colab:
https://colab.research.google.com/github/google-research/google-research/blob/master/representation_similarity/Demo.ipynb#scrollTo=MkucRi3yn7UJ
Note that we use center the features rather than the Gram matrix
because we think the latter is tricky and mysterious. It only works for
linear CKA though (we only implement linear CKA throughout).
"""
X = X - X.mean(0, keepdims=True)
Y = Y - Y.mean(0, keepdims=True)
XXT = X @ X.T
YYT = Y @ Y.T
# We use reshape((-1,)) instead of ravel() to ensure this is compatible
# with numpy and pytorch tensors.
top = (XXT.reshape((-1,)) * YYT.reshape((-1,))).sum()
bottom = np.sqrt((XXT ** 2).sum() * (YYT ** 2).sum())
c = top / bottom
return c
def cka_tall(X, Y):
"""
Calculate CKA for two matrices.
"""
X = X - X.mean(0, keepdims=True)
Y = Y - Y.mean(0, keepdims=True)
XTX = X.T @ X
YTY = Y.T @ Y
YTX = Y.T @ X
# Equation (4)
top = (YTX ** 2).sum()
bottom = np.sqrt((XTX ** 2).sum() * (YTY ** 2).sum())
c = top / bottom
return c
def cka(X, Y):
"""
Calculate CKA for two matrices.
CKA has several potential implementations. The naive implementation is
appropriate for tall matrices (more examples than features), but this
implementation uses lots of memory and it slow when there are many more
features than examples. In that case, which often happens with DNNs, we
prefer the Gram matrix variant.
"""
if X.shape[0] < X.shape[1]:
return cka_wide(X, Y)
else:
return cka_tall(X, Y)
def multi_cka(reps: List[np.array]) -> np.array:
"""
Calculate CKA matrix for a list of matrices.
Kornblith et al. (2019) https://arxiv.org/abs/1905.00414
Args:
reps: a list of representations of the same data from different
networks. All have the same height (number of examplars) but
potentially different numbers of columns.
Returns:
the CKA matrix (larger values mean more similar).
"""
C = np.zeros((len(reps), len(reps)))
for i in range(len(reps)):
C[i, i] = 1.0 # by definition
for j in range(i+1, len(reps)):
c = cka(reps[i], reps[j])
C[i, j] = c
C[j, i] = c
return C
def main():
with open ('../data/matrices.pkl', 'rb') as f:
data = pickle.load(f)
C = multi_cka(data['reps'])
df = | pd.DataFrame(C) | pandas.DataFrame |
import os
from typing import Tuple
import numpy as np
import pandas as pd
from mydeep_api._deprecated.file_dataset import FileDataset
from sign_mnist.prepare_sign_mnist import name_provider
from stream_lib.stream import stream
from surili_core.surili_io.image_io import OpencvIO
from surili_core.worker import Worker
from surili_core.workspace import Workspace
def load_dataframe(csv_path: str, target_shape: Tuple[int, int]):
_df = | pd.read_csv(csv_path) | pandas.read_csv |
# basics
from typing import Callable
import pandas as pd
import os
from pandas.core.frame import DataFrame
# segnlp
from segnlp import utils
from segnlp import metrics
from segnlp.utils.baselines import MajorityBaseline
from segnlp.utils.baselines import RandomBaseline
from segnlp.utils.baselines import SentenceMajorityBaseline
from segnlp.utils.baselines import SentenceRandomBaseline
from segnlp.utils.baselines import SentenceBIOBaseline
class Baseline:
def __run_baseline(
self,
baseline,
name : str,
df:pd.DataFrame,
kwargs:dict,
metric_f: Callable,
task_labels: dict,
):
all_metrics = []
for rs in utils.random_ints(self.n_random_seeds):
kwargs["random_seed"] = rs
#init the baseline model
bl = baseline(**kwargs)
# run baseline
pred_df = bl(df.copy(deep=True))
#evaluate baseline
metrics = metric_f(
pred_df = pred_df,
target_df = df,
task_labels = task_labels
)
metrics["random_seed"] = rs
metrics["baseline"] = name
all_metrics.append(metrics)
score_df = | pd.DataFrame(all_metrics) | pandas.DataFrame |
'''
Tests for Naive benchmark classes
Tests currently cover:
1. Forecast horizons
2. Allowable input types: np.ndarray, pd.DataFrame, pd.Series
3. Failure paths for abnormal input such as np.nan, non numeric,
empty arrays and np.Inf
4. Predictions
- naive1 - carries forward last value
- snaive - carries forward previous h values
- average - flat forecast of average
- drift - previous value + gradient
- ensemble naive - the average of all of the methods
- Test fit_predict()
5. Prediction intervals
- horizon
- sets i.e. 2 sets of intervals (0.8 and 0.95)
- width
- bootstrapped prediction intervals
- length of horizon
- number of sets of intervals returned.
6. Fitted values
- expected length
- count of NaN
'''
import pytest
import pandas as pd
import numpy as np
import forecast_tools.baseline as b
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_naive1_forecast_horizon(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.Naive1()
model.fit(pd.Series(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_naive1_fit_predict(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.Naive1()
# fit_predict for point forecasts only
preds = model.fit_predict(pd.Series(data), horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_snaive_forecast_horizon(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.SNaive(1)
model.fit(pd.Series(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_snaive_fit_predict(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.SNaive(1)
# fit_predict for point forecasts only
preds = model.fit_predict(pd.Series(data), horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_drift_forecast_horizon(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.Drift()
model.fit(np.array(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_drift_fit_predict(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.Drift()
# fit_predict for point forecasts only
preds = model.fit_predict(pd.Series(data), horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_average_forecast_horizon(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.Average()
model.fit(pd.Series(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_average_fit_predict(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.Average()
# fit_predict for point forecasts only
preds = model.fit_predict( | pd.Series(data) | pandas.Series |
import argparse
from pathlib import Path
from typing import List
import numpy as np
import pandas as pd
def categorize_by_label_distribution(group: pd.DataFrame,
label: str,
dif_threshold: float = 0.1,
top_percent: float = 0.2) -> str:
"""Classify category that based on distribution of the label
Args:
group (pd.DataFrame): DataFrame of each target.
label (str): The name of the label. e.g. identity(%), coverage(%).
dif_threshold (float, optional): Threshold of top group. Defaults to 0.1.
top_percent (float, optional): Threshold of percent of top group. Defaults to 0.2.
Returns:
str : The name of the category.
"""
identity_sorted = list(group[label].sort_values())
max_identity = identity_sorted[-1]
second_identity = identity_sorted[-2]
n_ten_percent = int(top_percent * len(group))
ten_percent_identity = identity_sorted[-n_ten_percent]
dif = max_identity - second_identity
NAME_CATEGORY = ['top', 'multi top', 'the others']
if dif > dif_threshold:
return NAME_CATEGORY[0]
elif max_identity - ten_percent_identity > dif_threshold:
return NAME_CATEGORY[1]
else:
return NAME_CATEGORY[2]
def categorize_by_max_quality_template(group: pd.DataFrame,
label: str = 'identity(%)',
threshold_list: List = [0.4, 0.6, 0.8]) -> str:
"""Classify category that based on maximum value of the label
Args:
group (pd.DataFrame): DataFrame of each target.
label (str, optional): The name of the label. Defaults to 'identity(%)'.
thresholds_list (List, optional): Threshold of label for each category. Defaults to [0.5, 0.8].
Returns:
str: The name of the category.
"""
max_quality = group[label].max()
if len(threshold_list) == 2:
NAME_CATEGORY = ['Low', 'Middle', 'High']
if max_quality < threshold_list[0]:
return NAME_CATEGORY[0]
elif max_quality < threshold_list[1]:
return NAME_CATEGORY[1]
else:
return NAME_CATEGORY[2]
elif len(threshold_list) == 3:
NAME_CATEGORY = ['Low', 'Mid-low', 'Mid-high', 'High']
if max_quality < threshold_list[0]:
return NAME_CATEGORY[0]
elif max_quality < threshold_list[1]:
return NAME_CATEGORY[1]
elif max_quality < threshold_list[2]:
return NAME_CATEGORY[2]
else:
return NAME_CATEGORY[3]
else:
raise ValueError('threshold_list should be 2 or 3')
def categorize_by_label(template_df: pd.DataFrame,
label: str,
max_value_threshold_list: List,
top_group_dif_threshold: float = 0.1,
top_group_percent: float = 0.2) -> pd.DataFrame:
"""Categorize target by the specified label.
Args:
template_df (pd.DataFrame): The DataFrame of the template.
label (str): The name of the label. e.g. "identity(%)"
top_group_dif_threshold (float): Threshold of top group that based on distribution.
top_group_percent (float): Threshold of percent of top group that based on distribution.
max_value_threshold_list (List): Threshold of label for each category (Category of max Quality).
Returns:
pd.DataFrame: DataFrame of category.
"""
label_extract_percent = label[: -3]
# Categorize by distribution of the label
dist_category = template_df.groupby('target').apply(
lambda x: categorize_by_label_distribution(x,
label=label,
dif_threshold=top_group_dif_threshold,
top_percent=top_group_percent))
dist_category_df = pd.DataFrame(dist_category).rename({0: label_extract_percent + '_dist_category'}, axis=1)
# if label related to 'coverage', return only dist category
if 'coverage' in label:
return dist_category_df
# Categorize by maximum value of the label
quality_category = template_df.groupby('target').apply(
lambda x: categorize_by_max_quality_template(x, label, threshold_list=max_value_threshold_list))
quality_category_df = pd.DataFrame(quality_category).rename(
{0: label_extract_percent + '_quality_category'}, axis=1)
return pd.concat([dist_category_df, quality_category_df], axis=1)
def categorize_target(tmscore_df: pd.DataFrame) -> pd.DataFrame:
"""Categorize target by alignment quality of template (identity, positive, and coverage)
Args:
tmscore_df (pd.DataFrame): DataFrame of tmscore.
Returns:
pd.DataFrame: DataFrame of categorized target.
"""
alignment_quality_row_columns = ['identity', 'positive', 'coverage']
# alignment_quality_row_columns = ['identity', 'positive', 'coverage',
# 'identity(-misres)', 'positive(-misres)', 'coverage(-misres)']
alignment_quality_columns = [c + '(%)' for c in alignment_quality_row_columns]
for c in alignment_quality_row_columns:
tmscore_df[c + '(%)'] = tmscore_df[c] / tmscore_df['seq_len']
template_df = tmscore_df.groupby(['target', 'template']).head(1).drop(['GDT_TS', 'GDT_HA'], axis=1)
# Categorize by each label
labels = alignment_quality_columns
category_df_list = []
for label in labels:
if 'identity' in label:
max_value_threshold_list = [0.4, 0.6, 0.8]
elif 'positive' in label:
max_value_threshold_list = [0.6, 0.8]
else:
max_value_threshold_list = [0.9, 0.95]
label_category_df = categorize_by_label(template_df, label,
max_value_threshold_list=max_value_threshold_list)
category_df_list.append(label_category_df)
category_df = pd.concat(category_df_list, axis=1)
return category_df
def get_target_stat(target_df, label='GDT_TS'):
d = {'Num models': len(target_df), 'mean ' + label: target_df[label].mean(), 'max ' + label: target_df[label].max(),
'med ' + label: target_df[label].median(), 'min ' + label: target_df[label].min(),
'var ' + label: np.var(target_df[label]), 'std ' + label: np.std(target_df[label])}
return pd.Series(d)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('tmscore', type=str, help='path to the csv of tmscore')
args = parser.parse_args()
tmscore_path = Path(args.tmscore)
tmscore_df = pd.read_csv(tmscore_path, index_col=0)
# create category
category_df = categorize_target(tmscore_df)
print(category_df)
# target info
need_columns = ['target', 'seq_len', 'Class', 'Domain_num']
drop_columns = set(tmscore_df.columns) - set(need_columns)
target_df = tmscore_df.groupby('target').head(1).drop(drop_columns, axis=1)
target_cdf = | pd.merge(target_df, category_df, on='target') | pandas.merge |
import pathlib
import requests
import pandas as pd
from bs4 import BeautifulSoup
class Brasileiro:
def __init__(self, year: int, series: str) -> None:
if year < 2012:
raise ValueError('year must be greater than 2012')
elif series.lower() not in ['a', 'b']:
raise ValueError("series must be 'A' or 'B'")
self.year = year
self.series = series.lower()
self._df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import re
import random
import ast
import warnings
import itertools
import time
from rdkit import Chem, rdBase
from rdkit.Chem import AllChem
from rdkit import RDLogger
from rdkit.Chem import Descriptors
from ast import literal_eval as leval
from copy import deepcopy
from tqdm import tqdm
import casadi as cas
from casadi import SX,integrator,vertcat
tqdm.pandas()
lg = RDLogger.logger()
lg.setLevel(RDLogger.ERROR)
class PolyMaker():
def __init__ (self):
self.smiles_req = {'ols':'[C,c;!$(C=O)][OH]',
'aliphatic_ols':'[C;!$(C=O);!$([a])][OH]',
'acids':'[#6][#6](=[#8:4])([F,Cl,Br,I,#8H,O-])',
'prime_amines':'[#6;!$(C=O)][NH2;!$([NH2+])]',
'carbonates':'[O]=[C]([F,Cl,Br,I,O])([F,Cl,Br,I,O])',
'acidanhydrides':'[#8]([#6](=[#8]))([#6](=[#8]))',
'prime_thiols':'[#6;!$(C=O)][SH]'}
self.reactions = { 'ester':
{'diols_acids':'[C;!$(C=O);!$([a]):6][OH:1].[#6:2][#6:3](=[O:4])([F,Cl,Br,I,#8H,O-:5])>>'
'[C:6][O:1][#6:3](=[O:4])([#6:2])',
'diacids_ols':'[#6:2][#6:3](=[O:4])([F,Cl,Br,I,#8H,O-:5]).[C;!$(C=O);!$([a]):6][OH:1]>>'
'[C:6][O:1][#6:3](=[O:4])([#6:2])',
'infinite_chain':'([C;!$(C=O);!$([a]):1][OH:2].[#6:3][#6:4](=[O:5])([F,Cl,Br,I,OH,O-:6]))>>'
'[*:1][*:2][*:4](=[*:5])[*:3]'},
'amide':
{'diamines_acids':'[#6;!$(C=O):0][NH2;!$([NH2+]):1].[#6:2][#6:3](=[O:4])([#8H,O-:5])>>'
'[#6:0][NH:1][#6:3](=[O:4])([#6:2])',
'diacids_amines':'[#6:2][#6:3](=[O:4])([#8H,O-:5]).[#6;!$(C=O):0][NH2;!$([NH2+]):1]>>'
'[#6:0][NH:1][#6:3](=[O:4])([#6:2])',
'infinite_chain':'([#6;!$(C=O):1][NH2;!$([NH2+]):2].[#6:3][#6:4](=[O:5])([#8H,O-:6]))>>'
'[*:1][*:2][*:4](=[*:5])[*:3]'},
'carbonate':{
'phosgene':{'diols_carbonates':'[C,c;!$(C=O):0][OH:1].[O:2]=[C:3]([F,Cl,Br,I,O:4])([F,Cl,Br,I:5])>>'
'[O:2]=[C:3]([O:1][C,c:0])[X:4]',
'carbonates_diols':'[O:2]=[C:3]([F,Cl,Br,I,O:4])([F,Cl,Br,I:5]).[C,c;!$(C=O):0][OH:1]>>'
'[O:2]=[C:3]([O:1][C,c:0])[X:4]',
'infinite_chain':'([C,c;!$(C=O):0][OH:1].[O:2]=[C:3]([F,Cl,Br,I,O:4])([F,Cl,Br,I:5]))>>'
'[O:2]=[C:3]([O:4])([O:1][C,c:0])'},
'nonphosgene':{'diols_carbonates':'[C,c;!$(C=O):0][OH:1].[O:2]=[C:3]([O:4][C,c:6])([O:5][C,c])>>'
'[O:2]=[C:3]([O:1][C,c:0])[O:4][C,c:6]',
'carbonates_diols':'[O:2]=[C:3]([O:4][C,c:6])([O:5][C,c]).[C,c;!$(C=O):0][OH:1]>>'
'[O:2]=[C:3]([O:1][C,c:0])[O:4][C,c:6]',
'infinite_chain':'([C,c;!$(C=O):0][OH:1].[O:2]=[C:3]([O:4][C,c:6])([O:5][C,c]))>>'
'[O:2]=[C:3]([O:1][C,c:0])[O:4][C,c:6]'}},
'imide':
{'diacidanhydrides_amines':'[#8:3]([#6:4](=[#8:5]))([#6:6](=[#8:7])).[#6;!$(C=O):0][NH2:1]>>'
'[#6:0][N:1]([#6:4](=[#8:5]))([#6:6](=[#8:7]))',
'diamines_acidanhydrides':'[#6;!$(C=O):0][NH2:1].[#8:3]([#6:4](=[#8:5]))([#6:6](=[#8:7]))>>'
'[#6:0][N:1]([#6:4](=[#8:5]))([#6:6](=[#8:7]))',
'infinite_chain':'([#8:3]([#6:4](=[#8:5]))([#6:6](=[#8:7])).[#6;!$(C=O):0][NH2:1])>>'
'[#6:0][N:1]([#6:4](=[#8:5]))([#6:6](=[#8:7]))'},
'open_acidanhydrides':
{'add_OH':'[#8:3]([#6:4](=[#8:5]))([#6:6](=[#8:7]))>>'
'[#8:3]([#6:4](=[#8:5])(O))([#6:6](=[#8:7]))'}
}
self.__verison__ = '0.1.3.2'
def checksmile(self,s):
'''checks to make sure monomer is readable by rdkit and
returns canonical smile
Input: string
Returns: string
'''
rdBase.DisableLog('rdApp.error')
try:
mol = Chem.MolFromSmiles(s)
mol = Chem.MolToSmiles(mol)
except:
mol = ''
rdBase.EnableLog('rdApp.error')
return mol
def get_monomers(self,s,stereochemistry=False):
'''parses a string into a list of monomers
the string is separated by '.' and each monomer is checked
for validity
Input: string
Returns: list of strings
'''
try:s=ast.literal_eval(s)
except:pass
if type(s)==str:
s = s.split('.')
if not stereochemistry:s = [s_i.replace('/','').replace('@','') for s_i in s]
monomers = tuple([self.checksmile(s_i) for s_i in s])
if np.any(np.array(monomers)=='',):monomers==None
if type(s)==tuple:
monomers=s
return monomers
def thermoset(self,reactants,mechanism,crosslinker=[],distribution=[],DP=10,replicate_structures=1,verbose=True):
''' Inputs:
reactants: contains smiles strings for reactants used in the polymer for both backbone and crosslinks
a tuple
or a strings of monomers
or a pandas dataframe containing a list of monomers as strings with column title 'monomers'
crosslinker: a list of 0's and 1's
each value will correspond to the mononmers in reactants
0's will indicate the corresponding monomer is part of the backbone
1's will indicate the corresponding monomer is part of the crosslink
a list of integers
or a column in dataframe that is named 'crosslinker'
example: [0,0,0,1]
distribution: number of mols for each monomer in the reaction. values should be in samer order as reactancts
list of floats
or column in dataframe that is named 'mols'
example: [10,10,3,1]
DP: degree of polymerization which is the number of monomer units in the polymer
an integer, if an integer the same DP will be used for the backbone and the crosslinks
a tuple, will contain only 2 values, the first value will be for the backbone and the second
for the crosslinks
mechanism: one of the following strings,
upe: unsaturated polyester, backbone will be a polyester with unsaturated bonds, crosslinks will be vinyls, olefins, acrylates
replicate_structures: integer, number of replicate structures which will be generated
Returns:
polymer: string
# '''
returnpoly = pd.DataFrame()
#converst monomers to tuple if reactants is dataframee
if type(reactants)==pd.DataFrame:
try: reactants.loc[:,'monomers'] = reactants.apply(lambda row: self.get_monomers(row.monomers),axis=1)
except:pass
for rep in range(0,replicate_structures):
returnpoly_i = pd.DataFrame()
# reactants,crosslinks,etc should be a tuple but as a string going into polymerization methods
# this puts everthing into dataframe before generating structures
#fixing reactants and build dataframe
if type(reactants)==pd.DataFrame:
returnpoly_i = reactants
if 'mechanism' not in reactants.columns: returnpoly_i.loc[:,'mechanism'] = mechanism
returnpoly_i.loc[:,'replicate_structure']=rep
returnpoly_i.loc[:,'monomers'] = returnpoly_i.monomers.astype(str)
returnpoly_i.loc[:,'mechanism'] = mechanism
elif type(reactants)==str:
try:
reactants_i = ast.literal_eval(reactants)
except:
reactants_i = self.get_monomers(reactants)
returnpoly_i.loc[:,'monomers']=pd.Series(str(reactants_i))
returnpoly_i.loc[:,'distribution']=pd.Series(str(distribution))
returnpoly_i.loc[:,'crosslinker']=pd.Series(str(crosslinker))
returnpoly_i.loc[:,'replicate_structure']=rep
returnpoly_i.loc[:,'monomers'] = returnpoly_i.monomers.astype(str)
returnpoly_i.loc[:,'mechanism'] = mechanism
elif type(reactants)==tuple:
returnpoly_i.loc[:,'monomers']=pd.Series(str(reactants))
returnpoly_i.loc[:,'distribution']=pd.Series(str(distribution))
returnpoly_i.loc[:,'crosslinker']=pd.Series(str(crosslinker))
returnpoly_i.loc[:,'replicate_structure']=rep
returnpoly_i.loc[:,'monomers'] = returnpoly_i.monomers.astype(str)
returnpoly_i.loc[:,'mechanism'] = mechanism
else:
raise ValueError('Data type not recognized')
#building dataframe
returnpoly = pd.concat([returnpoly,returnpoly_i])
# build polymers
if verbose:
returnpoly[['polymer','mechanism']] = returnpoly.progress_apply(
lambda row:
self.__polymerizemechanism_thermoset(
leval(row.monomers),
row.mechanism,
leval(row.crosslinker),
leval(row.distribution),
DP),
axis=1)
else:
returnpoly[['polymer','mechanism']] = returnpoly.apply(
lambda row:
self.__polymerizemechanism_thermoset(
leval(row.monomers),
row.mechanism,
leval(row.crosslinker),
leval(row.distribution),
DP),
axis=1)
returnpoly = returnpoly.sort_index().sort_values('replicate_structure')
# BUILD STRUCTURE
return returnpoly
def thermoplastic(self,reactants,DP=2,mechanism='',replicate_structures=1,distribution=[],pm=None,infinite_chain=False,verbose=True):
'''Polymerization method for building thermoplastics
Inputs:
reactants: a tuple
or a strings of monomers
or a pandas dataframe containing a list of monomers as strings with column title monomers
DP: integer, degree of polymerization which is the number of monomer units in the polymer
mechanism: string,
vinyl: performs polymerization along vinyl groups
ester: performs condensation reaction on dicarboxylic acid + diol
ester_stereo: performs condensation reaction on dicarboxylic acid + diol where stereoregulatirty is also specified
amide: performs condensation reaction on dicarboxylic acid + diamine
carbonate: performs condensation reaction on carbonate + diol
replicate_structures: integer, number of replicate structures which will be generated
Returns:
polymer: dataframe
'''
returnpoly = pd.DataFrame()
for rep in range(0,replicate_structures):
returnpoly_i = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from pandas.testing import assert_series_equal
from sid.config import INDEX_NAMES
from sid.update_states import _kill_people_over_icu_limit
from sid.update_states import _update_immunity_level
from sid.update_states import _update_info_on_new_tests
from sid.update_states import _update_info_on_new_vaccinations
from sid.update_states import compute_waning_immunity
from sid.update_states import update_derived_state_variables
@pytest.mark.unit
def test_kill_people_over_icu_limit_not_binding():
states = pd.DataFrame({"needs_icu": [False] * 5 + [True] * 5, "cd_dead_true": -1})
params = pd.DataFrame(
{
"category": ["health_system"],
"subcategory": ["icu_limit_relative"],
"name": ["icu_limit_relative"],
"value": [50_000],
}
).set_index(INDEX_NAMES)
result = _kill_people_over_icu_limit(states, params, 0)
assert result["cd_dead_true"].eq(-1).all()
@pytest.mark.unit
@pytest.mark.parametrize("n_dead", range(6))
def test_kill_people_over_icu_limit_binding(n_dead):
states = pd.DataFrame(
{
"needs_icu": [False] * (5 - n_dead) + [True] * (5 + n_dead),
"cd_dead_true": -1,
}
)
params = pd.DataFrame(
{
"category": ["health_system"],
"subcategory": ["icu_limit_relative"],
"name": ["icu_limit_relative"],
"value": [50_000],
}
).set_index(INDEX_NAMES)
result = _kill_people_over_icu_limit(states, params, 0)
expected = [10 - n_dead, n_dead] if n_dead != 0 else [10]
assert (result["cd_dead_true"].value_counts() == expected).all()
@pytest.mark.unit
def test_update_info_on_new_tests():
"""Test that info on tests is correctly update.
The tests assume three people: 1. A generic case, 2. someone who will receive a
test, 3. someone who receives a positive test result, 4. someone who receives a
negative test result.
"""
states = pd.DataFrame(
{
"pending_test_date": pd.to_datetime([None, "2020-01-01", None, None]),
"cd_received_test_result_true": [-1, -1, 0, 0],
"cd_received_test_result_true_draws": [3, 3, 3, 3],
"received_test_result": [False, False, True, True],
"new_known_case": False,
"immunity": [0.0, 0.0, 1.0, 0.0],
"knows_immune": False,
"symptomatic": [False, False, False, False],
"infectious": [False, False, True, False],
"knows_infectious": False,
"cd_knows_infectious_false": -1,
"cd_infectious_false": [-1, -1, 5, -1],
}
)
to_be_processed_tests = pd.Series([False, True, False, False])
result = _update_info_on_new_tests(states, to_be_processed_tests)
expected = pd.DataFrame(
{
"pending_test_date": pd.to_datetime([None, None, None, None]),
"cd_received_test_result_true": [-1, 3, 0, 0],
"cd_received_test_result_true_draws": [3, 3, 3, 3],
"received_test_result": [False, False, False, False],
"new_known_case": [False, False, True, False],
"immunity": [0.0, 0.0, 1.0, 0.0],
"knows_immune": [False, False, True, False],
"symptomatic": [False, False, False, False],
"infectious": [False, False, True, False],
"knows_infectious": [False, False, True, False],
"cd_knows_infectious_false": [-1, -1, 5, -1],
"cd_infectious_false": [-1, -1, 5, -1],
}
)
assert result.equals(expected)
@pytest.mark.unit
def test_update_info_on_new_vaccinations():
states = pd.DataFrame(
{
"newly_vaccinated": [False, False, False, False],
"ever_vaccinated": [False, False, False, True],
"cd_ever_vaccinated": [-9999, -9999, -9999, -10],
}
)
newly_vaccinated = pd.Series([False, False, True, False])
result = _update_info_on_new_vaccinations(states, newly_vaccinated)
expected = pd.DataFrame(
{
"newly_vaccinated": [False, False, True, False],
"ever_vaccinated": [False, False, True, True],
"cd_ever_vaccinated": [-9999, -9999, 0, -10],
}
)
assert result.equals(expected)
@pytest.mark.unit
def test_update_derived_state_variables():
states = pd.DataFrame()
states["a"] = np.arange(5)
derived_state_variables = {"b": "a <= 3"}
calculated = update_derived_state_variables(states, derived_state_variables)["b"]
expected = pd.Series([True, True, True, True, False], name="b")
| assert_series_equal(calculated, expected) | pandas.testing.assert_series_equal |
# python 2
try:
from urllib.request import Request, urlopen
# Python 3
except ImportError:
from urllib2 import Request, urlopen
import pandas as pd
import time
import datetime
import numpy as np
import re
import json
from bs4 import BeautifulSoup
from pytrends.request import TrendReq
class Cryptory():
def __init__(self, from_date, to_date=None, ascending=False,
fillgaps=True, timeout=10.0):
"""Initialise cryptory class
Parameters
----------
from_date : the starting date (as string) for the returned data;
required format is %Y-%m-%d (e.g. "2017-06-21")
to_date : the end date (as string) for the returned data;
required format is %Y-%m-%d (e.g. "2017-06-21")
Optional. If unspecified, it will default to the current day
to_date : binary. Determines whether the returned dataframes are
ordered by date in ascending or descending order
(defaults to False i.e. most recent first)
fillgaps : binary. When data does not exist (e.g. weekends for stocks)
should the rows be filled in with the previous available data
(defaults to True e.g. Saturday stock price will be same as Friday)
fillgaps : float. The max time allowed (in seconds) to pull data from a website
If exceeded, an timeout error is returned. Default is 10 seconds.
"""
self.from_date = from_date
# if to_date provided, defaults to current date
if to_date is None:
self.to_date = datetime.date.today().strftime("%Y-%m-%d")
else:
self.to_date = to_date
self.ascending = ascending
self.fillgaps = fillgaps
self.timeout = timeout
self._df = pd.DataFrame({'date':pd.date_range(start=self.from_date, end=self.to_date)})
def extract_reddit_metrics(self, subreddit, metric, col_label="", sub_col=False):
"""Retrieve daily subscriber data for a specific subreddit scraped from redditmetrics.com
Parameters
----------
subreddit : the name of subreddit (e.g. "python", "learnpython")
metric : the particular subscriber information to be retrieved
(options are limited to "subscriber-growth" (daily change),
'total-subscribers' (total subscribers on a given day) and
'rankData' (the position of the subreddit on reddit overall)
'subscriber-growth-perc' (daily percentage change in subscribers))
col_label : specify the title of the value column
(it will default to the metric name with hyphens replacing underscores)
sub_col : whether to include the subreddit name as a column
(default is False i.e. the column is not included)
Returns
-------
pandas Dataframe
"""
if metric not in ['subscriber-growth', 'total-subscribers', 'rankData', 'subscriber-growth-perc']:
raise ValueError(
"Invalid metric: must be one of 'subscriber-growth', " +
"'total-subscribers', 'subscriber-growth-perc', 'rankData'")
url = "http://redditmetrics.com/r/" + subreddit
if metric == 'subscriber-growth-perc':
metric_name = 'total-subscribers'
else:
metric_name = metric
try:
parsed_page = urlopen(url, timeout=self.timeout).read()
parsed_page = parsed_page.decode("utf8")
except Exception as e:
return pd.DataFrame({"error":e}, index=[0])
if metric == 'rankData':
start_segment = parsed_page.find(metric)
else:
start_segment = parsed_page.find("element: '"+metric_name+"'")
if start_segment != -1:
start_list = parsed_page.find("[", start_segment)
end_list = parsed_page.find("]", start_list)
parsed_page = parsed_page[start_list:end_list + 1]
else:
return pd.DataFrame({"error":"Could not find that subreddit"}, index=[0])
parsed_page = parsed_page.replace("'", '"')
parsed_page = parsed_page.replace('a', '\"subscriber_count\"')
parsed_page = parsed_page.replace('y', '\"date\"')
output = json.loads(parsed_page)
output = | pd.DataFrame(output) | pandas.DataFrame |
# Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
import pytest
import numpy as np
import numpy.ma as ma
import pandas as pd
import scipy as sp
import math
from itertools import repeat, chain
from ..bin import *
from ..bin import _process_column_initial, _encode_categorical_existing, _process_continuous
class StringHolder:
def __init__(self, internal_str):
self.internal_str = internal_str
def __str__(self):
return self.internal_str
def __lt__(self, other):
return True # make all objects of this type identical to detect sorting failures
def __hash__(self):
return 0 # make all objects of this type identical to detect hashing failures
def __eq__(self,other):
return True # make all objects of this type identical to detect hashing failures
class DerivedStringHolder(StringHolder):
def __init__(self, internal_str):
StringHolder.__init__(self, internal_str)
class FloatHolder:
def __init__(self, internal_float):
self.internal_float = internal_float
def __float__(self):
return self.internal_float
def __lt__(self, other):
return True # make all objects of this type identical to detect sorting failures
def __hash__(self):
return 0 # make all objects of this type identical to detect hashing failures
def __eq__(self,other):
return True # make all objects of this type identical to detect hashing failures
class DerivedFloatHolder(FloatHolder):
def __init__(self, internal_float):
FloatHolder.__init__(self, internal_float)
class FloatAndStringHolder:
def __init__(self, internal_float, internal_str):
self.internal_float = internal_float
self.internal_str = internal_str
def __float__(self):
return self.internal_float
def __str__(self):
return self.internal_str
def __lt__(self, other):
return True # make all objects of this type identical to detect sorting failures
def __hash__(self):
return 0 # make all objects of this type identical to detect hashing failures
def __eq__(self,other):
return True # make all objects of this type identical to detect hashing failures
class DerivedFloatAndStringHolder(FloatAndStringHolder):
def __init__(self, internal_float, internal_str):
FloatAndStringHolder.__init__(self, internal_float, internal_str)
class NothingHolder:
# the result of calling str(..) includes the memory address, so they won't be dependable categories
def __init__(self, internal_str):
self.internal_str = internal_str
def check_pandas_normal(dtype, val1, val2):
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([val1, val2], dtype=np.object_), dtype=dtype)
feature_types_given = ['nominal']
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
X_cols = list(unify_columns(X, [(0, None)], feature_names_in, None))
assert(len(X_cols) == 1)
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 2)
assert(X_cols[0][1][0] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val2)])
c1 = {str(val1) : 1, str(val2) : 2}
X_cols = list(unify_columns(X, [(0, c1)], feature_names_in, feature_types_given))
assert(len(X_cols) == 1)
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c1)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 2)
assert(X_cols[0][1][0] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val2)])
c2 = {str(val2) : 1, str(val1) : 2}
X_cols = list(unify_columns(X, [(0, c2)], feature_names_in, feature_types_given))
assert(len(X_cols) == 1)
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 2)
assert(X_cols[0][1][0] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val2)])
def check_pandas_missings(dtype, val1, val2):
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([val2, val1, val1], dtype=np.object_), dtype=dtype)
X["feature2"] = pd.Series(np.array([None, val2, val1], dtype=np.object_), dtype=dtype)
X["feature3"] = pd.Series(np.array([val1, None, val2], dtype=np.object_), dtype=dtype)
X["feature4"] = pd.Series(np.array([val2, val1, None], dtype=np.object_), dtype=dtype)
c1 = {str(val1) : 1, str(val2) : 2}
c2 = {str(val2) : 1, str(val1) : 2}
feature_types_given = ['nominal', 'nominal', 'nominal', 'nominal']
X, n_samples = clean_X(X)
assert(n_samples == 3)
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None), (3, None)], feature_names_in, None))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(len(X_cols[1][2]) == 2)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(len(X_cols[2][2]) == 2)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(len(X_cols[3][2]) == 2)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
X_cols = list(unify_columns(X, [(0, c1), (1, c1), (2, c1), (3, c1)], feature_names_in, feature_types_given))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c1)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is c1)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is c1)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is c1)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
X_cols = list(unify_columns(X, [(0, c2), (1, c2), (2, c2), (3, c2)], feature_names_in, feature_types_given))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is c2)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is c2)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is c2)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
X_cols = list(unify_columns(X, [(0, c1), (1, c2), (2, c1), (3, c2)], feature_names_in, feature_types_given))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c1)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is c2)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is c1)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is c2)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
def check_pandas_float(dtype, val1, val2):
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([val2, val1, val1], dtype=np.object_), dtype=dtype)
X["feature2"] = pd.Series(np.array([None, val2, val1], dtype=np.object_), dtype=dtype)
X["feature3"] = pd.Series(np.array([val1, None, val2], dtype=np.object_), dtype=dtype)
X["feature4"] = pd.Series(np.array([val2, val1, None], dtype=np.object_), dtype=dtype)
X, n_samples = clean_X(X)
assert(n_samples == 3)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in, min_unique_continuous=0))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'continuous')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is None)
assert(X_cols[0][1].dtype == np.float64)
assert(X_cols[0][1][0] == np.float64(dtype(val2)))
assert(X_cols[0][1][1] == np.float64(dtype(val1)))
assert(X_cols[0][1][2] == np.float64(dtype(val1)))
assert(X_cols[1][0] == 'continuous')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is None)
assert(X_cols[1][1].dtype == np.float64)
assert(np.isnan(X_cols[1][1][0]))
assert(X_cols[1][1][1] == np.float64(dtype(val2)))
assert(X_cols[1][1][2] == np.float64(dtype(val1)))
assert(X_cols[2][0] == 'continuous')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is None)
assert(X_cols[2][1].dtype == np.float64)
assert(X_cols[2][1][0] == np.float64(dtype(val1)))
assert(np.isnan(X_cols[2][1][1]))
assert(X_cols[2][1][2] == np.float64(dtype(val2)))
assert(X_cols[3][0] == 'continuous')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is None)
assert(X_cols[3][1].dtype == np.float64)
assert(X_cols[3][1][0] == np.float64(dtype(val2)))
assert(X_cols[3][1][1] == np.float64(dtype(val1)))
assert(np.isnan(X_cols[3][1][2]))
def check_numpy_throws(dtype_src, val1, val2):
X = np.array([[val1, val2], [val1, val2]], dtype=dtype_src)
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
try:
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_process_continuous_float64():
vals, bad = _process_continuous(np.array([3.5, 4.5], dtype=np.float64), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([3.5, 4.5], dtype=np.float64)))
def test_process_continuous_float32():
vals, bad = _process_continuous(np.array([3.1, np.nan], dtype=np.float32), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 2)
assert(vals[0] == 3.0999999046325684)
assert(np.isnan(vals[1]))
def test_process_continuous_int8():
vals, bad = _process_continuous(np.array([7, -9], dtype=np.int8), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([7, -9], dtype=np.float64)))
def test_process_continuous_uint16_missing():
vals, bad = _process_continuous(np.array([7], dtype=np.uint16), np.array([True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 2)
assert(vals[0] == 7)
assert(np.isnan(vals[1]))
def test_process_continuous_bool():
vals, bad = _process_continuous(np.array([False, True], dtype=np.bool_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([0, 1], dtype=np.float64)))
def test_process_continuous_bool_missing():
vals, bad = _process_continuous(np.array([False, True], dtype=np.bool_), np.array([True, False, True], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 3)
assert(vals[0] == 0)
assert(np.isnan(vals[1]))
assert(vals[2] == 1)
def test_process_continuous_obj_simple():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5")], dtype=np.object_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5, 3, 4.5, 5.5], dtype=np.float64)))
def test_process_continuous_obj_simple_missing():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5")], dtype=np.object_), np.array([True, True, True, True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 6)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(np.isnan(vals[5]))
def test_process_continuous_obj_hard():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5"), DerivedStringHolder("7.5"), FloatHolder(8.5), DerivedFloatHolder(9.5), FloatAndStringHolder(10.5, "88"), DerivedFloatAndStringHolder(11.5, "99")], dtype=np.object_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5, 3, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5], dtype=np.float64)))
def test_process_continuous_obj_hard_missing():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5")], dtype=np.object_), np.array([True, True, True, True, True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 7)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(vals[5] == 6.5)
assert(np.isnan(vals[6]))
def test_process_continuous_obj_hard_bad():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5"), "bad", StringHolder("bad2"), NothingHolder("bad3")], dtype=np.object_), np.array([True, True, True, True, True, True, True, False, True, True], dtype=np.bool_))
assert(len(bad) == 10)
assert(bad[0] is None)
assert(bad[1] is None)
assert(bad[2] is None)
assert(bad[3] is None)
assert(bad[4] is None)
assert(bad[5] is None)
assert(bad[6] == "bad")
assert(bad[7] is None)
assert(bad[8] == "bad2")
assert(isinstance(bad[9], str))
assert(vals.dtype == np.float64)
assert(len(vals) == 10)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(vals[5] == 6.5)
assert(np.isnan(vals[7]))
def test_process_continuous_str_simple():
vals, bad = _process_continuous(np.array(["1", "2.5"], dtype=np.unicode_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5], dtype=np.float64)))
def test_process_continuous_str_simple_missing():
vals, bad = _process_continuous(np.array(["1", "2.5"], dtype=np.unicode_), np.array([True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 3)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(np.isnan(vals[2]))
def test_process_continuous_str_hard_bad():
vals, bad = _process_continuous(np.array(["1", "2.5", "bad"], dtype=np.unicode_), np.array([True, True, True, False], dtype=np.bool_))
assert(len(bad) == 4)
assert(bad[0] is None)
assert(bad[1] is None)
assert(bad[2] == "bad")
assert(bad[3] is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 4)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(np.isnan(vals[3]))
def test_process_column_initial_int_float():
# this test is hard since np.unique seems to think int(4) == float(4.0) so naively it returns just "4"
encoded, c = _process_column_initial(np.array([4, 4.0], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["4"] == 1)
assert(c["4.0"] == 2)
assert(np.array_equal(encoded, np.array([c["4"], c["4.0"]], dtype=np.int64)))
def test_process_column_initial_float32_float64():
# np.float64(np.float32(0.1)) != np.float64(0.1) since the float32 to float64 version has the lower mantisa bits
# all set to zero, and there will be another float64 that will be closer to "0.1" in float64 representation, so
# they aren't the same, but if to convert them to strings first then they are identical. Strings are the
# ultimate arbiter of categorical membership since strings are cross-platform and JSON encodable. np.unique
# will tend to separate the float32 and the float64 values since they aren't the same, but then serialize
# them to the same string. The our model has ["0.1", "0.1"] as the categories if we don't convert to float64!
encoded, c = _process_column_initial(np.array([np.float32(0.1), np.float64(0.1)], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["0.1"] == 1)
assert(c["0.10000000149011612"] == 2)
assert(np.array_equal(encoded, np.array([c["0.10000000149011612"], c["0.1"]], dtype=np.int64)))
def test_process_column_initial_obj_obj():
encoded, c = _process_column_initial(np.array([StringHolder("abc"), StringHolder("def")], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["def"] == 2)
assert(np.array_equal(encoded, np.array([c["abc"], c["def"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), None, 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["xyz"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], c["xyz"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["xyz"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], 0, c["xyz"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), None, 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["xyz"] == 1)
assert(c["abc"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], c["xyz"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["xyz"] == 1)
assert(c["abc"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], 0, c["xyz"]], dtype=np.int64)))
def test_process_column_initial_float64_nomissing():
encoded, c = _process_column_initial(np.array(["11.1", "2.2", "11.1"], dtype=np.unicode_), None, 'ANYTHING_ELSE', None)
assert(len(c) == 2)
assert(c["2.2"] == 1)
assert(c["11.1"] == 2)
assert(np.array_equal(encoded, np.array([c["11.1"], c["2.2"], c["11.1"]], dtype=np.int64)))
def test_process_column_initial_float64_missing():
encoded, c = _process_column_initial(np.array(["11.1", "2.2", "11.1"], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), 'ANYTHING_ELSE', None)
assert(len(c) == 2)
assert(c["2.2"] == 1)
assert(c["11.1"] == 2)
assert(np.array_equal(encoded, np.array([c["11.1"], c["2.2"], 0, c["11.1"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), None, 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["-1"] == 1)
assert(c["1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], c["1"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), np.array([True, True, False, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["-1"] == 1)
assert(c["1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], 0, c["1"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), None, 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["1"] == 1)
assert(c["-1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], c["1"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), np.array([True, True, False, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["1"] == 1)
assert(c["-1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], 0, c["1"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), None, 'nominal_alphabetical', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["True"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), None, 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["False"] == 1)
assert(c["True"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["False"], c["True"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["True"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), np.array([True, True, False, True, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["False"] == 1)
assert(c["True"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["False"], c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), None, 'nominal_prevalence', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), None, 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["True"] == 1)
assert(c["False"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["False"], c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), np.array([True, True, False, True, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["True"] == 1)
assert(c["False"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["False"], c["True"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str():
c = {"cd": 1, "ab": 2}
encoded, bad = _encode_categorical_existing(np.array(["ab", "cd"], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["ab"], c["cd"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_bool():
c = {"True": 1, "False": 2}
encoded, bad = _encode_categorical_existing(np.array([True, False], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["True"], c["False"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_int_small():
c = {"-2": 1, "3": 2, "1": 3}
encoded, bad = _encode_categorical_existing(np.array([int(1), np.int8(-2), np.uint64(3)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["1"], c["-2"], c["3"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_int_big():
c = {"-2": 1, "18446744073709551615": 2, "1": 3}
encoded, bad = _encode_categorical_existing(np.array([int(1), np.int8(-2), np.uint64("18446744073709551615")], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["1"], c["-2"], c["18446744073709551615"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_floats():
# np.float64(np.float32(0.1)) != np.float64(0.1) since the float32 to float64 version has the lower mantisa bits
# all set to zero, and there will be another float64 that will be closer to "0.1" in float64 representation, so
# they aren't the same, but if to convert them to strings first then they are identical. Strings are the
# ultimate arbiter of categorical membership since strings are cross-platform and JSON encodable. np.unique
# will tend to separate the float32 and the float64 values since they aren't the same, but then serialize
# them to the same string. The our model has ["0.1", "0.1"] as the categories if we don't convert to float64!
c = {"1.1": 1, "2.19921875": 2, "3.299999952316284": 3, "4.4": 4, "5.5": 5}
encoded, bad = _encode_categorical_existing(np.array([float(1.1), np.float16(2.2), np.float32(3.3), np.float64(4.4), np.longfloat(5.5)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["1.1"], c["2.19921875"], c["3.299999952316284"], c["4.4"], c["5.5"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_int():
c = {"abc": 1, "1": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", int(1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_float():
c = {"abc": 1, "1.1": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", float(1.1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1.1"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_float64():
c = {"abc": 1, "1.1": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", np.float64(1.1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1.1"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_float32():
c = {"abc": 1, "1.100000023841858": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", np.float32(1.1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1.100000023841858"]], dtype=np.int64)))
def test_encode_categorical_existing_int_float():
# this test is hard since np.unique seems to think int(4) == float(4) so naively it returns just "4"
c = {"4": 1, "4.0": 2}
encoded, bad = _encode_categorical_existing(np.array([int(4), 4.0], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["4"], c["4.0"]], dtype=np.int64)))
def test_encode_categorical_existing_int_float32():
# if you take np.float64(np.float32(0.1)) != np.float64(0.1) since the float32 version has the lower mantisa
# bits all set to zero, and there will be another float64 that will be closer to "0.1" for float64s, so
# they aren't the same, but if to convert them to strings first then they are identical. I tend to think
# of strings are the ultimate arbiter of categorical membership since strings are cross-platform
# np.unique will tend to separate the float32 and the float64 values since they aren't the same, but then
# serialize them to the same string. The our model has ["0.1", "0.1"] as the categories!!
c = {"4": 1, "0.10000000149011612": 2}
encoded, bad = _encode_categorical_existing(np.array([int(4), np.float32(0.1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["4"], c["0.10000000149011612"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_obj():
c = {"abc": 1, "def": 2}
encoded, bad = _encode_categorical_existing(np.array([StringHolder("abc"), StringHolder("def")], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["def"]], dtype=np.int64)))
def test_encode_categorical_existing_str():
c = {"abc": 1, "def": 2, "ghi": 3}
encoded, bad = _encode_categorical_existing(np.array(["abc", "ghi", "def", "something"], dtype=np.unicode_), np.array([True, True, False, True, True], dtype=np.bool_), c)
assert(np.array_equal(bad, np.array([None, None, None, None, "something"], dtype=np.object_)))
assert(np.array_equal(encoded, np.array([c["abc"], c["ghi"], 0, c["def"], -1], dtype=np.int64)))
def test_encode_categorical_existing_int8():
c = {"5": 1, "0": 2, "-9": 3}
encoded, bad = _encode_categorical_existing(np.array([5, -9, 0, 0, -9, 5, 99], dtype=np.int8), np.array([True, True, True, False, True, True, True, True], dtype=np.bool_), c)
assert(np.array_equal(bad, np.array([None, None, None, None, None, None, None, "99"], dtype=np.object_)))
assert(np.array_equal(encoded, np.array([c["5"], c["-9"], c["0"], 0, c["0"], c["-9"], c["5"], -1], dtype=np.int64)))
def test_encode_categorical_existing_bool():
c = {"False": 1, "True": 2}
encoded, bad = _encode_categorical_existing(np.array([False, True, False], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["False"], c["True"], 0, c["False"]], dtype=np.int64)))
def test_encode_categorical_existing_bool_true():
c = {"True": 1}
encoded, bad = _encode_categorical_existing(np.array([False, True, False], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), c)
assert(np.array_equal(bad, np.array(["False", None, None, "False"], dtype=np.object_)))
assert(np.array_equal(encoded, np.array([-1, c["True"], 0, -1], dtype=np.int64)))
def test_encode_categorical_existing_bool_false():
c = {"False": 1}
encoded, bad = _encode_categorical_existing(np.array([False, True, False], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), c)
assert(np.array_equal(bad, np.array([None, "True", None, None], dtype=np.object_)))
assert(np.array_equal(encoded, np.array([c["False"], -1, 0, c["False"]], dtype=np.int64)))
def test_process_column_initial_choose_floatcategories():
encoded, c = _process_column_initial(np.array([11.11, 2.2, np.float32(2.2), "2.2", StringHolder("2.2")], dtype=np.object_), None, None, 4)
assert(c["2.2"] == 1)
assert(c["2.200000047683716"] == 2)
assert(c["11.11"] == 3)
assert(np.array_equal(encoded, np.array([c["11.11"], c["2.2"], c["2.200000047683716"], c["2.2"], c["2.2"]], dtype=np.int64)))
def test_process_column_initial_choose_floats():
encoded, c = _process_column_initial(np.array([11.11, 2.2, np.float32(2.2), "2.2", StringHolder("2.2"), 3.3, 3.3], dtype=np.object_), None, None, 3)
assert(c is None)
assert(np.array_equal(encoded, np.array([11.11, 2.2, 2.200000047683716, 2.2, 2.2, 3.3, 3.3], dtype=np.float64)))
def test_unify_columns_numpy1():
X = np.array([1, 2, 3])
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(np.array_equal(X_cols[0][1], np.array([X_cols[0][2]["1"]], dtype=np.int64)))
assert(np.array_equal(X_cols[1][1], np.array([X_cols[1][2]["2"]], dtype=np.int64)))
assert(np.array_equal(X_cols[2][1], np.array([X_cols[2][2]["3"]], dtype=np.int64)))
def test_unify_columns_numpy2():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(np.array_equal(X_cols[0][1], np.array([X_cols[0][2]["1"], X_cols[0][2]["4"]], dtype=np.int64)))
assert(np.array_equal(X_cols[1][1], np.array([X_cols[1][2]["2"], X_cols[1][2]["5"]], dtype=np.int64)))
assert(np.array_equal(X_cols[2][1], np.array([X_cols[2][2]["3"], X_cols[2][2]["6"]], dtype=np.int64)))
def test_unify_columns_numpy_ignore():
X = np.array([["abc", None, "def"], ["ghi", "jkl", None]])
feature_types_given=['ignore', 'ignore', 'ignore']
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in, feature_types_given))
assert(3 == len(X_cols))
assert(X_cols[0][0] == 'ignore')
assert(X_cols[0][2] is None)
assert(X_cols[0][1] is None)
assert(np.array_equal(X_cols[0][3], np.array(["abc", "ghi"], dtype=np.object_)))
assert(X_cols[1][0] == 'ignore')
assert(X_cols[1][2] is None)
assert(X_cols[1][1] is None)
assert(np.array_equal(X_cols[1][3], np.array([None, "jkl"], dtype=np.object_)))
assert(X_cols[2][0] == 'ignore')
assert(X_cols[2][2] is None)
assert(X_cols[2][1] is None)
assert(np.array_equal(X_cols[2][3], np.array(["def", None], dtype=np.object_)))
def test_unify_columns_scipy():
X = sp.sparse.csc_matrix([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([X_cols[0][2]["1"], X_cols[0][2]["4"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
assert(np.array_equal(X_cols[1][1], np.array([X_cols[1][2]["2"], X_cols[1][2]["5"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
assert(np.array_equal(X_cols[2][1], np.array([X_cols[2][2]["3"], X_cols[2][2]["6"]], dtype=np.int64)))
def test_unify_columns_dict1():
X = {"feature1" : [1], "feature2" : "hi", "feature3" : None}
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X, feature_names_given=["feature3", "feature2", "feature1"])
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(X_cols[0][1][0] == 0)
assert(X_cols[1][1].dtype == np.int64)
assert(X_cols[1][1][0] == X_cols[1][2]["hi"])
assert(X_cols[2][1].dtype == np.int64)
assert(X_cols[2][1][0] == X_cols[2][2]["1"])
def test_unify_columns_dict2():
X = {"feature1" : [1, 4], "feature2" : [2, 5], "feature3" : [3, 6]}
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_names_given=["feature3", "feature2", "feature1"])
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([X_cols[0][2]["3"], X_cols[0][2]["6"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
assert(np.array_equal(X_cols[1][1], np.array([X_cols[1][2]["2"], X_cols[1][2]["5"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
assert(np.array_equal(X_cols[2][1], np.array([X_cols[2][2]["1"], X_cols[2][2]["4"]], dtype=np.int64)))
def test_unify_columns_list1():
X = [1, 2.0, "hi", None]
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(4 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(X_cols[0][1][0] == X_cols[0][2]["1"])
assert(X_cols[1][1].dtype == np.int64)
assert(X_cols[1][1][0] == X_cols[1][2]["2.0"])
assert(X_cols[2][1].dtype == np.int64)
assert(X_cols[2][1][0] == X_cols[2][2]["hi"])
assert(X_cols[3][1].dtype == np.int64)
assert(X_cols[3][1][0] == 0)
def test_unify_columns_list2():
P1 = pd.DataFrame()
P1["feature1"] = pd.Series(np.array([1, None, np.nan], dtype=np.object_))
P2 = pd.DataFrame()
P2["feature1"] = pd.Series(np.array([1], dtype=np.float32))
P2["feature2"] = pd.Series(np.array([None], dtype=np.object_))
P2["feature3"] = pd.Series(np.array([np.nan], dtype=np.object_))
S1 = sp.sparse.csc_matrix([[1, 2, 3]])
S2 = sp.sparse.csc_matrix([[1], [2], [3]])
X = [np.array([1, 2, 3], dtype=np.int8), pd.Series([4.0, None, np.nan]), [1, 2.0, "hi"], (np.double(4.0), "bye", None), {1, 2, 3}, {"abc": 1, "def": 2, "ghi":3}.keys(), {"abc": 1, "def": 2, "ghi":3}.values(), range(1, 4), (x for x in [1, 2, 3]), np.array([1, 2, 3], dtype=np.object_), np.array([[1, 2, 3]], dtype=np.int8), np.array([[1], [2], [3]], dtype=np.int8), P1, P2, S1, S2]
X, n_samples = clean_X(X)
assert(n_samples == 16)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([c["1"], c["4.0"], c["1"], c["4.0"], c["1"], c["abc"], c["1"], c["1"], c["1"], c["1"], c["1"], c["1"], c["1"], c["1.0"], c["1"], c["1"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
c = X_cols[1][2]
assert(np.array_equal(X_cols[1][1], np.array([c["2"], 0, c["2.0"], c["bye"], c["2"], c["def"], c["2"], c["2"], c["2"], c["2"], c["2"], c["2"], 0, 0, c["2"], c["2"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
c = X_cols[2][2]
assert(np.array_equal(X_cols[2][1], np.array([c["3"], 0, c["hi"], 0, c["3"], c["ghi"], c["3"], c["3"], c["3"], c["3"], c["3"], c["3"], 0, 0, c["3"], c["3"]], dtype=np.int64)))
def test_unify_columns_tuple1():
X = (1, 2.0, "hi", None)
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(4 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(X_cols[0][1][0] == X_cols[0][2]["1"])
assert(X_cols[1][1].dtype == np.int64)
assert(X_cols[1][1][0] == X_cols[1][2]["2.0"])
assert(X_cols[2][1].dtype == np.int64)
assert(X_cols[2][1][0] == X_cols[2][2]["hi"])
assert(X_cols[3][1].dtype == np.int64)
assert(X_cols[3][1][0] == 0)
def test_unify_columns_tuple2():
X = (np.array([1, 2, 3], dtype=np.int8), pd.Series([4, 5, 6]), [1, 2.0, "hi"], (np.double(4.0), "bye", None), {1, 2, 3}, {"abc": 1, "def": 2, "ghi":3}.keys(), {"abc": 1, "def": 2, "ghi":3}.values(), range(1, 4), (x for x in [1, 2, 3]), np.array([1, 2, 3], dtype=np.object_))
X, n_samples = clean_X(X)
assert(n_samples == 10)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([c["1"], c["4"], c["1"], c["4.0"], c["1"], c["abc"], c["1"], c["1"], c["1"], c["1"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
c = X_cols[1][2]
assert(np.array_equal(X_cols[1][1], np.array([c["2"], c["5"], c["2.0"], c["bye"], c["2"], c["def"], c["2"], c["2"], c["2"], c["2"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
c = X_cols[2][2]
assert(np.array_equal(X_cols[2][1], np.array([c["3"], c["6"], c["hi"], 0, c["3"], c["ghi"], c["3"], c["3"], c["3"], c["3"]], dtype=np.int64)))
def test_unify_columns_generator1():
X = (x for x in [1, 2.0, "hi", None])
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(4 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(X_cols[0][1][0] == X_cols[0][2]["1"])
assert(X_cols[1][1].dtype == np.int64)
assert(X_cols[1][1][0] == X_cols[1][2]["2.0"])
assert(X_cols[2][1].dtype == np.int64)
assert(X_cols[2][1][0] == X_cols[2][2]["hi"])
assert(X_cols[3][1].dtype == np.int64)
assert(X_cols[3][1][0] == 0)
def test_unify_columns_generator2():
X = (x for x in [np.array([1, 2, 3], dtype=np.int8), pd.Series([4, 5, 6]), [1, 2.0, "hi"], (np.double(4.0), "bye", None), {1, 2, 3}, {"abc": 1, "def": 2, "ghi":3}.keys(), {"abc": 1, "def": 2, "ghi":3}.values(), range(1, 4), (x for x in [1, 2, 3]), np.array([1, 2, 3], dtype=np.object_)])
X, n_samples = clean_X(X)
assert(n_samples == 10)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([c["1"], c["4"], c["1"], c["4.0"], c["1"], c["abc"], c["1"], c["1"], c["1"], c["1"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
c = X_cols[1][2]
assert(np.array_equal(X_cols[1][1], np.array([c["2"], c["5"], c["2.0"], c["bye"], c["2"], c["def"], c["2"], c["2"], c["2"], c["2"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
c = X_cols[2][2]
assert(np.array_equal(X_cols[2][1], np.array([c["3"], c["6"], c["hi"], 0, c["3"], c["ghi"], c["3"], c["3"], c["3"], c["3"]], dtype=np.int64)))
def test_unify_columns_pandas_normal_int8():
check_pandas_normal(np.int8, -128, 127)
def test_unify_columns_pandas_normal_uint8():
check_pandas_normal(np.uint8, 0, 255)
def test_unify_columns_pandas_normal_int16():
check_pandas_normal(np.int16, -32768, 32767)
def test_unify_columns_pandas_normal_uint16():
check_pandas_normal(np.uint16, 0, 65535)
def test_unify_columns_pandas_normal_int32():
check_pandas_normal(np.int32, -2147483648, 2147483647)
def test_unify_columns_pandas_normal_uint32():
check_pandas_normal(np.uint32, 0, 4294967295)
def test_unify_columns_pandas_normal_int64():
check_pandas_normal(np.int64, -9223372036854775808, 9223372036854775807)
def test_unify_columns_pandas_normal_uint64():
check_pandas_normal(np.uint64, np.uint64("0"), np.uint64("18446744073709551615"))
def test_unify_columns_pandas_normal_bool():
check_pandas_normal(np.bool_, False, True)
def test_unify_columns_pandas_missings_float64():
check_pandas_float(np.float64, -1.1, 2.2)
def test_unify_columns_pandas_missings_longfloat():
check_pandas_float(np.longfloat, -1.1, 2.2)
def test_unify_columns_pandas_missings_float32():
check_pandas_float(np.float32, -1.1, 2.2)
def test_unify_columns_pandas_missings_float16():
check_pandas_float(np.float16, -1.1, 2.2)
def test_unify_columns_pandas_missings_Int8Dtype():
check_pandas_missings(pd.Int8Dtype(), -128, 127)
def test_unify_columns_pandas_missings_UInt8Dtype():
check_pandas_missings(pd.UInt8Dtype(), 0, 255)
def test_unify_columns_pandas_missings_Int16Dtype():
check_pandas_missings(pd.Int16Dtype(), -32768, 32767)
def test_unify_columns_pandas_missings_UInt16Dtype():
check_pandas_missings(pd.UInt16Dtype(), 0, 65535)
def test_unify_columns_pandas_missings_Int32Dtype():
check_pandas_missings(pd.Int32Dtype(), -2147483648, 2147483647)
def test_unify_columns_pandas_missings_UInt32Dtype():
check_pandas_missings(pd.UInt32Dtype(), 0, 4294967295)
def test_unify_columns_pandas_missings_Int64Dtype():
check_pandas_missings(pd.Int64Dtype(), -9223372036854775808, 9223372036854775807)
def test_unify_columns_pandas_missings_UInt64Dtype():
check_pandas_missings(pd.UInt64Dtype(), np.uint64("0"), np.uint64("18446744073709551615"))
def test_unify_columns_pandas_missings_BooleanDtype():
check_pandas_missings(pd.BooleanDtype(), False, True)
def test_unify_columns_pandas_missings_str():
check_pandas_missings(np.object_, "abc", "def")
def test_unify_columns_pandas_missings_nice_str():
check_pandas_missings(np.object_, StringHolder("abc"), "def")
def test_unify_columns_pandas_missings_pure_ints():
check_pandas_missings(np.object_, 1, 2)
def test_unify_columns_pandas_missings_pure_floats():
check_pandas_missings(np.object_, 1.1, 2.2)
def test_unify_columns_pandas_missings_mixed_floats():
check_pandas_missings(np.object_, 1.1, "2.2")
def test_unify_columns_pandas_missings_mixed_floats2():
check_pandas_missings(np.object_, StringHolder("1.1"), "2.2")
def test_unify_columns_str_throw():
X = "abc"
try:
X, n_samples = clean_X(X)
assert(False)
except:
pass
try:
feature_names_in = unify_feature_names(X)
assert(False)
except:
pass
try:
feature_names_in = ["ANYTHING"]
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_unify_columns_int_throw():
X = 1
try:
X, n_samples = clean_X(X)
assert(False)
except:
pass
try:
feature_names_in = unify_feature_names(X)
assert(False)
except:
pass
try:
feature_names_in = ["ANYTHING"]
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_unify_columns_duplicate_colnames_throw():
X = pd.DataFrame()
X["0"] = [1, 2]
X[0] = [3, 4]
try:
feature_names_in = unify_feature_names(X)
assert(False)
except:
pass
try:
feature_names_in = ["ANYTHING"]
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_unify_columns_opaque_str_throw():
# this should fail since the default string generator makes a useless as a category string like:
# <interpret.glassbox.ebm.test.test_bin.NothingHolder object at 0x0000019525E9FE48>
check_numpy_throws(np.object_, NothingHolder("abc"), "def")
def test_unify_columns_list_throw():
check_numpy_throws(np.object_, ["abc", "bcd"], "def")
def test_unify_columns_tuple_throw():
check_numpy_throws(np.object_, ("abc", "bcd"), "def")
def test_unify_columns_set_throw():
check_numpy_throws(np.object_, {"abc", "bcd"}, "def")
def test_unify_columns_dict_throw():
check_numpy_throws(np.object_, {"abc": 1, "bcd": 2}, "def")
def test_unify_columns_keys_throw():
check_numpy_throws(np.object_, {"abc": 1, "bcd": 2}.keys(), "def")
def test_unify_columns_values_throw():
check_numpy_throws(np.object_, {"abc": 1, "bcd": 2}.values(), "def")
def test_unify_columns_range_throw():
check_numpy_throws(np.object_, range(1, 2), "def")
def test_unify_columns_generator_throw():
check_numpy_throws(np.object_, (x for x in [1, 2]), "def")
def test_unify_columns_ndarray_throw():
check_numpy_throws(np.object_, np.array([1, "abc"], dtype=np.object_), "def")
def test_unify_columns_pandas_obj_to_float():
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([None, np.nan, np.float16(np.nan), 0, -1, 2.2, "-3.3", np.float16("4.4"), StringHolder("-5.5"), np.float32("6.6").item()], dtype=np.object_), dtype=np.object_)
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 10)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(X_cols[0][0] == 'continuous')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is None)
assert(X_cols[0][1].dtype == np.float64)
assert(np.isnan(X_cols[0][1][0]))
assert(np.isnan(X_cols[0][1][1]))
assert(np.isnan(X_cols[0][1][2]))
assert(X_cols[0][1][3] == 0)
assert(X_cols[0][1][4] == -1)
assert(X_cols[0][1][5] == 2.2)
assert(X_cols[0][1][6] == -3.3)
assert(X_cols[0][1][7] == 4.3984375)
assert(X_cols[0][1][8] == -5.5)
assert(X_cols[0][1][9] == 6.5999999046325684) # python internal objects are float64
def test_unify_columns_pandas_obj_to_str():
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([None, np.nan, np.float16(np.nan), 0, -1, 2.2, "-3.3", np.float16("4.4"), StringHolder("-5.5"), 5.6843418860808014e-14, "None", "nan"], dtype=np.object_), dtype=np.object_)
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 12)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
# For "5.684341886080802e-14", we need to round the 16th digit up for this to be the shortest string since
# "5.684341886080801e-14" doesn't work
# https://www.exploringbinary.com/the-shortest-decimal-string-that-round-trips-may-not-be-the-nearest/
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["0"], c["-1"], c["2.2"], c["-3.3"], c["4.3984375"], c["-5.5"], c["5.684341886080802e-14"], c["None"], c["nan"]], dtype=np.int64)))
assert(np.array_equal(na, X_cols[0][1] == 0))
def test_unify_columns_pandas_categorical():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "bcd"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 3)
assert(X_cols[0][2]["a"] == 1)
assert(X_cols[0][2]["0"] == 2)
assert(X_cols[0][2]["bcd"] == 3)
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_ordinal():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "bcd"], ordered=True))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'ordinal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 3)
assert(X_cols[0][2]["a"] == 1)
assert(X_cols[0][2]["0"] == 2)
assert(X_cols[0][2]["bcd"] == 3)
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_shorter():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "0"], dtype=pd.CategoricalDtype(categories=["a", "0"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 5)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_equals():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "bcd"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_longer():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "bcd", "in_categories"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:2]))
assert(all(~na[2:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(np.array_equal(X_cols[0][3], np.array([None, None, "in_categories", None, None, None], dtype=np.object_)))
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, -1, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_reordered_shorter():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "0"], dtype=pd.CategoricalDtype(categories=["0", "a"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 5)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_reordered_equals():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "bcd", "0"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_reordered_longer1():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "in_categories", "bcd"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:2]))
assert(all(~na[2:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(np.array_equal(X_cols[0][3], np.array([None, None, "in_categories", None, None, None], dtype=np.object_)))
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, -1, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_reordered_longer2():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["0", "a", "bcd", "in_categories"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:2]))
assert(all(~na[2:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(np.array_equal(X_cols[0][3], np.array([None, None, "in_categories", None, None, None], dtype=np.object_)))
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, -1, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_compressed_categories():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "bcd", "0"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
# here we're combining the "a" category and the "0" category into a single one that tracks both.
# in JSON this can be expressed as the equivalent of [["a", "0"], "bcd"]
c = {"a": 1, "0": 1, "bcd": 2}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_feature_names_numpy1():
X = np.array([1, 2, 3])
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[2][1][0] == 3.0)
def test_unify_feature_names_numpy2():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_data_frame1():
X = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["0", "1", "2"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_data_frame2():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature1", "feature2", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_scipy():
X = sp.sparse.csc_matrix([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_dict1():
X = {"feature1" : [1], "feature2" : [2], "feature3" : [3]}
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature1", "feature2", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[2][1][0] == 3.0)
def test_unify_feature_names_dict2():
X = {"feature2" : [1, 4], "feature1" : [2, 5], "feature3" : [3, 6]}
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature1", "feature2", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 2.0)
assert(X_cols[0][1][1] == 5.0)
assert(X_cols[1][1][0] == 1.0)
assert(X_cols[1][1][1] == 4.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_list1():
X = [1, 2, 3]
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[2][1][0] == 3.0)
def test_unify_feature_names_list2():
X = [ | pd.Series([1, 2, 3]) | pandas.Series |
import pandas as pd
from tarpan.shared.compare_parameters import (
save_compare_parameters, CompareParametersType)
def run_model():
data1 = {
"x": [1, 2, 3, 4, 5, 6],
"y": [-1, -2, -3, -4, -5, -6],
"z": [40, 21, 32, 41, 11, 31]
}
df1 = | pd.DataFrame(data1) | pandas.DataFrame |
#
# Like hypergraph(); adds engine = 'pandas' | 'cudf' | 'dask' | 'dask-cudf'
#
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from .Engine import Engine, DataframeLike, DataframeLocalLike
import logging, numpy as np, pandas as pd, pyarrow as pa, sys
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# TODO: When Python 3.8+, switch to TypedDict
class HyperBindings():
def __init__(
self,
TITLE: str = 'nodeTitle',
DELIM: str = '::',
NODEID: str = 'nodeID',
ATTRIBID: str = 'attribID',
EVENTID: str = 'EventID',
EVENTTYPE: str = 'event',
SOURCE: str = 'src',
DESTINATION: str = 'dst',
CATEGORY: str = 'category',
NODETYPE: str = 'type',
EDGETYPE: str = 'edgeType',
NULLVAL: str = 'null',
SKIP: Optional[List[str]] = None,
CATEGORIES: Dict[str, List[str]] = {},
EDGES: Optional[Dict[str, List[str]]] = None
):
self.title = TITLE
self.delim = DELIM
self.node_id = NODEID
self.attrib_id = ATTRIBID
self.event_id = EVENTID
self.event_type = EVENTTYPE
self.source = SOURCE
self.destination = DESTINATION
self.category = CATEGORY
self.node_type = NODETYPE
self.edge_type = EDGETYPE
self.categories = CATEGORIES
self.edges = EDGES
self.null_val = NULLVAL
self.skip = (SKIP or []).copy()
# Prevent metadata fields from turning into nodes
bindings = vars(self)
if SKIP is None:
key : str
for key in [
#'title', 'node_id', 'attrib_id', 'event_id', 'node_type', 'edge_type'
]:
if (key in bindings) and (bindings[key] not in self.skip):
self.skip.append(bindings[key])
self.skip.sort()
def screen_entities(events: DataframeLike, entity_types: Optional[List[str]], defs: HyperBindings) -> List[str]:
"""
List entity columns: Unskipped user-specified entities when provided, else unskipped cols
"""
logger.debug('@screen_entities: skip [ %s ]', defs.skip)
base = entity_types if entity_types is not None else [x for x in events.columns]
out = [x for x in base if x not in defs.skip]
logger.debug('////screen_entities: %s', out)
return out
def col2cat(cat_lookup: Dict[str, str], col: str):
return cat_lookup[col] if col in cat_lookup else col
def make_reverse_lookup(categories):
lookup = {}
for category in categories:
for col in categories[category]:
lookup[col] = str(category)
return lookup
def coerce_col_safe(s, to_dtype):
if s.dtype.name == to_dtype.name:
return s
if to_dtype.name == 'int64':
return s.fillna(0).astype('int64')
if to_dtype.name == 'timedelta64[ns]':
return s.fillna(np.datetime64('NAT')).astype(str)
logger.debug('CEORCING %s :: %s -> %s', s.name, s.dtype, to_dtype)
return s.astype(to_dtype)
def format_entities_from_col(
defs: HyperBindings,
cat_lookup: Dict[str, str],
drop_na: bool,
engine: Engine,
col_name: str,
df_with_col: DataframeLike,
meta: pd.DataFrame,
debug: bool
) -> DataframeLocalLike:
"""
For unique v in column col, create [{col: str(v), title: str(v), nodetype: col, nodeid: `<cat><delim><v>`}]
- respect drop_na
- respect colname overrides
- receive+return pd.DataFrame / cudf.DataFrame depending on engine
"""
logger.debug('@format_entities: [drop: %s], %s / %s', drop_na, col_name, [c for c in df_with_col])
try:
df_with_col_pre = df_with_col[col_name].dropna() if drop_na else df_with_col[col_name]
if debug and engine in [ Engine.DASK, Engine.DASK_CUDF ]:
df_with_col_pre = df_with_col_pre.persist()
logger.debug('col [ %s ] entities with dropna [ %s ]: %s', col_name, drop_na, df_with_col_pre.compute())
try:
unique_vals = df_with_col_pre.drop_duplicates()
except:
unique_vals = df_with_col_pre.astype(str).drop_duplicates()
logger.warning('Coerced col %s to string type for entity names', col_name)
unique_safe_val_strs = unique_vals.astype(str).fillna(defs.null_val)
except NotImplementedError:
logger.warning('Dropped col %s from entity list due to errors')
unique_vals = mt_series(engine)
unique_safe_val_strs = unique_vals.astype(str).fillna(defs.null_val)
if debug and engine in [ Engine.DASK, Engine.DASK_CUDF ]:
unique_vals = unique_vals.persist()
logger.debug('unique_vals: %s', unique_vals.compute())
base_df = unique_vals.rename(col_name).to_frame()
base_df = base_df.assign(**{
defs.title: unique_safe_val_strs,
defs.node_type: col_name,
defs.category: col2cat(cat_lookup, col_name),
defs.node_id: (col2cat(cat_lookup, col_name) + defs.delim) + unique_safe_val_strs
})
if debug and engine in [ Engine.DASK, Engine.DASK_CUDF ]:
base_df = base_df.persist()
logger.debug('base_df1: %s', base_df.compute())
missing_cols = [ c for c in meta if c not in base_df ]
base_df = base_df.assign(**{
c: np.nan
for c in missing_cols
})
logger.debug('==== BASE 2 ====')
if debug and engine in [ Engine.DASK, Engine.DASK_CUDF ]:
base_df = base_df.persist()
logger.debug('base_df2: %s', base_df.compute())
logger.debug('needs conversions: %s',
[(c, base_df[c].dtype.name, meta[c].dtype.name) for c in missing_cols])
for c in base_df:
logger.debug('test base_df2 col [ %s ]: %s', c, base_df[c].dtype)
logger.debug('base_df2[ %s ]: %s', c, base_df[c].compute())
logger.debug('convert [ %s ] %s -> %s', c, base_df[c].dtype.name, meta[c].dtype.name)
logger.debug('orig: %s', base_df[c].compute())
logger.debug('was a missing col needing coercion: %s', c in missing_cols)
if c in missing_cols:
logger.debug('coerced 1: %s', coerce_col_safe(base_df[c], meta[c].dtype).compute())
logger.debug('coerced 2: %s', base_df.assign(**{c: coerce_col_safe(base_df[c], meta[c].dtype)}).compute())
base_as_meta_df = base_df.assign(**{
c: coerce_col_safe(base_df[c], meta[c].dtype) if base_df[c].dtype.name != meta[c].dtype.name else base_df[c]
for c in missing_cols
})
logger.debug('==== BASE 3 ====')
if debug and engine in [ Engine.DASK, Engine.DASK_CUDF ]:
base_as_meta_df = base_as_meta_df.persist()
for c in base_df:
logger.debug('test base_df3 col [ %s ]: %s -> %s', c, base_df[c].dtype, base_as_meta_df[c].dtype)
logger.debug('base_df3[ %s ]: %s', c, base_as_meta_df[c].compute())
return base_as_meta_df
def concat(dfs: List[DataframeLike], engine: Engine, debug=False):
if debug and len(dfs) > 1:
df0 = dfs[0]
for c in df0:
logger.debug('checking df0: %s :: %s', c, df0[c].dtype)
for df_i in dfs[1:]:
if c not in df_i:
logger.warning('missing df0[%s]::%s in df_i', c, df0[c].dtype)
if df0[c].dtype != df_i[c].dtype:
logger.warning('mismatching df0[c]::%s vs df_i[c]::%s for %s', df0[c].dtype, df_i[c].dtype, c)
for df_i in dfs[1:]:
for c in df_i:
logger.debug('checking df_i: %s', c)
if c not in df0:
logger.warning('missing df_i[%s]::%s in df0', c, df_i[c].dtype)
logger.debug('all checked!')
if engine == Engine.PANDAS:
return pd.concat(dfs, ignore_index=True, sort=False)
if engine == Engine.DASK:
import dask.dataframe
return dask.dataframe.concat(dfs).reset_index(drop=True)
if engine == Engine.CUDF:
import cudf
try:
return cudf.concat(dfs, ignore_index=True)
except TypeError as e:
logger.warning('Failed to concat, likely due to column type issue, try converting to a string; columns')
for df in dfs:
logger.warning('df types :: %s', df.dtypes)
raise e
if engine == Engine.DASK:
import dask.dataframe as dd
return dd.concat(dfs)
if engine == Engine.DASK_CUDF:
import dask_cudf
return dask_cudf.concat(dfs)
raise NotImplementedError('Unknown engine')
def get_df_cons(engine: Engine):
if engine == Engine.PANDAS:
return pd.DataFrame
if engine == Engine.DASK:
import dask.dataframe
return dask.dataframe.DataFrame
if engine == Engine.CUDF:
import cudf
return cudf.DataFrame
if engine == Engine.DASK_CUDF:
import dask_cudf
return dask_cudf.DataFrame
raise NotImplementedError('Unknown engine')
def mt_df(engine: Engine):
if engine == Engine.DASK:
import dask.dataframe
return dask.dataframe.from_pandas(pd.DataFrame(), npartitions=1)
if engine == Engine.DASK_CUDF:
import cudf, dask_cudf
return dask_cudf.from_cudf(cudf.from_pandas(pd.DataFrame()), npartitions=1)
cons = get_df_cons(engine)
return cons()
def get_series_cons(engine: Engine, dtype='int32'):
if engine == Engine.PANDAS:
return pd.Series
if engine == Engine.DASK:
import dask.dataframe
return dask.dataframe.Series
if engine == Engine.CUDF:
import cudf
return cudf.Series
if engine == Engine.DASK_CUDF:
import dask_cudf
return dask_cudf.Series
raise NotImplementedError('Unknown engine')
def series_cons(engine: Engine, arr: List, dtype='int32', npartitions=None, chunksize=None):
if engine == Engine.PANDAS:
return pd.Series(arr, dtype=dtype)
if engine == Engine.DASK:
import dask.dataframe
return dask.dataframe.from_pandas(pd.Series(arr, dtype=dtype), npartitions=npartitions, chunksize=chunksize).astype(dtype)
if engine == Engine.CUDF:
import cudf
return cudf.Series(arr, dtype=dtype)
if engine == Engine.DASK_CUDF:
import cudf, dask_cudf
gs = cudf.Series(arr, dtype=dtype)
out = dask_cudf.from_cudf(gs, npartitions=npartitions, chunksize=chunksize)
out2 = out.astype(dtype)
logger.debug('series_cons :: %s => %s => %s', gs.dtype, out.dtype, out2.dtype)
return out2
raise NotImplementedError('Unknown engine')
def mt_series(engine: Engine, dtype='int32'):
cons = get_series_cons(engine)
return cons([], dtype=dtype)
# This will be slightly wrong: pandas will turn datetime64['ms'] into datetime64['ns']
def mt_nodes(defs: HyperBindings, events: DataframeLike, entity_types: List[str], direct: bool, engine: Engine) -> pd.DataFrame:
single_engine = engine
if engine == Engine.DASK_CUDF:
single_engine = Engine.CUDF
if engine == Engine.DASK:
single_engine = Engine.PANDAS
mt_obj_s = series_cons(single_engine, [], dtype='object', npartitions=1)
out = ((events[ entity_types ] if direct else events)
.head(0)
.assign(
**{
defs.title: mt_obj_s,
defs.event_id: mt_obj_s,
defs.node_type: mt_obj_s,
defs.category: mt_obj_s,
defs.node_id: mt_obj_s,
}
))
logger.debug('mt_nodes init :: %s', out.dtypes)
return out
#ex output: DataFrameLike([{'val::state': 'CA', 'nodeType': 'state', 'nodeID': 'state::CA'}])
def format_entities(
events: DataframeLike,
entity_types: List[str],
defs: HyperBindings,
direct: bool,
drop_na: bool,
engine: Engine,
npartitions: Optional[int],
chunksize: Optional[int],
debug: bool = False) -> DataframeLike:
logger.debug('@format_entities :: %s', entity_types)
logger.debug('dtypes: %s', events.dtypes)
cat_lookup = make_reverse_lookup(defs.categories)
logger.debug('@format_entities cat_lookup [ %s ] => [ %s ]', defs.categories, cat_lookup)
mt_df = mt_nodes(defs, events, entity_types, direct, engine)
logger.debug('mt_df :: %s', mt_df.dtypes)
entity_dfs = [
format_entities_from_col(
defs, cat_lookup, drop_na, engine,
col_name, events[[col_name]], mt_df,
debug)
for col_name in entity_types
]
if debug and (engine in [Engine.DASK, Engine.DASK_CUDF]):
entity_dfs = [ df.persist() for df in entity_dfs ]
for df in entity_dfs:
logger.debug('format_entities sub df dtypes: %s', df.dtypes)
if df.dtypes.to_dict() != entity_dfs[0].dtypes.to_dict():
logger.error('MISMATCHES')
d1 = df.dtypes.to_dict()
d2 = entity_dfs[0].dtypes.to_dict()
for k, v in d1.items():
if k not in d2:
logger.error('key %s (::%s) missing in df_0', k, v)
elif d2[k] != v:
logger.error('%s:%s <> %s:%s', k, v, k, d2[k])
for k, v in d2.items():
if k not in d1:
logger.error('key %s (::%s) missing in df_i', k, v)
logger.debug('entity_df: %s', df.compute())
df = concat(entity_dfs, engine, debug).drop_duplicates([defs.node_id])
if debug and (engine in [Engine.DASK, Engine.DASK_CUDF]):
df = df.persist()
df.compute()
logger.debug('////format_entities')
return df
#ex output: DataFrame([{'edgeType': 'state', 'attribID': 'state::CA', 'eventID': 'eventID::0'}])
def format_hyperedges(
engine: Engine, events: DataframeLike, entity_types: List[str], defs: HyperBindings,
drop_na: bool, drop_edge_attrs: bool, debug: bool = False
) -> DataframeLike:
is_using_categories = len(defs.categories.keys()) > 0
cat_lookup = make_reverse_lookup(defs.categories)
# mt_pdf = pd.DataFrame({
# **{
# **({defs.category: pd.Series([], dtype='object')} if is_using_categories else {}),
# defs.edge_type: pd.Series([], dtype='object'),
# defs.attrib_id: pd.Series([], dtype='object'),
# defs.event_id: pd.Series([], dtype='object'),
# defs.category: pd.Series([], dtype='object'),
# defs.node_id: pd.Series([], dtype='object'),
# },
# **({
# x: pd.Series([], dtype=events[x].dtype)
# for x in entity_types
# } if drop_edge_attrs else {
# x: pd.Series([], dtype=events[x].dtype)
# for x in events.columns
# })
# })
subframes = []
for col in sorted(entity_types):
fields = list(set([defs.event_id] + ([x for x in events.columns] if not drop_edge_attrs else [ col ])))
raw = events[ fields ]
if drop_na:
logger.debug('dropping na [ %s ] from available [ %s] (fields: [ %s ])', col, raw.columns, fields)
raw = raw.dropna(subset=[col])
raw = raw.copy()
if is_using_categories:
raw[defs.edge_type] = col2cat(cat_lookup, col)
raw[defs.category] = col
else:
raw[defs.edge_type] = col
try:
raw[defs.attrib_id] = (col2cat(cat_lookup, col) + defs.delim) + raw[col].astype(str).fillna(defs.null_val)
except NotImplementedError:
logger.warning('Did not create hyperedges for column %s as does not support astype(str)', col)
continue
if drop_edge_attrs:
logger.debug('dropping val col [ %s ] from [ %s ]', col, raw.columns)
raw = raw.drop(columns=[col])
logger.debug('dropped => [ %s ]', raw.columns)
if debug and (engine in [Engine.DASK, Engine.DASK_CUDF]):
raw = raw.persist()
raw.compute()
subframes.append(raw)
if len(subframes):
result_cols = list(set(
([x for x in events.columns.tolist() if not x == defs.node_type]
if not drop_edge_attrs
else [])
+ [defs.edge_type, defs.attrib_id, defs.event_id] # noqa: W503
+ ([defs.category] if is_using_categories else []) )) # noqa: W503
if debug and (engine in [Engine.DASK, Engine.DASK_CUDF]):
#subframes = [df.persist() for df in subframes]
for df in subframes:
logger.debug('edge sub: %s', df.dtypes)
out = concat(subframes, engine, debug).reset_index(drop=True)[ result_cols ]
if debug and (engine in [Engine.DASK, Engine.DASK_CUDF]):
out = out.persist()
out.compute()
logger.debug('////format_hyperedges')
return out
else:
return mt_series(engine)
def direct_edgelist_shape(entity_types: List[str], defs: HyperBindings) -> Dict[str, List[str]]:
"""
Edges take format {src_col: [dest_col1, dest_col2], ....}
If None, create connect all to all, leaving up to algorithm in which direction
"""
if defs.edges is not None:
return defs.edges
else:
out = {}
for entity_i in range(len(entity_types)):
out[ entity_types[entity_i] ] = entity_types[(entity_i + 1):]
return out
#ex output: DataFrameLike([{'edgeType': 'state', 'attribID': 'state::CA', 'eventID': 'eventID::0'}])
def format_direct_edges(
engine: Engine, events: DataframeLike, entity_types, defs: HyperBindings, edge_shape, drop_na: bool, drop_edge_attrs: bool,
debug: bool = False
) -> DataframeLike:
is_using_categories = len(defs.categories.keys()) > 0
cat_lookup = make_reverse_lookup(defs.categories)
subframes = []
for col1 in sorted(edge_shape.keys()):
for col2 in sorted(edge_shape[col1]):
fields = list(set([defs.event_id] + ([x for x in events.columns] if not drop_edge_attrs else [col1, col2])))
raw = events[ fields ]
if drop_na:
raw = raw.dropna(subset=[col1, col2])
raw = raw.copy()
if is_using_categories:
raw[defs.edge_type] = col2cat(cat_lookup, col1) + defs.delim + col2cat(cat_lookup, col2)
raw[defs.category] = col1 + defs.delim + col2
else:
raw[defs.edge_type] = col1 + defs.delim + col2
raw[defs.source] = (col2cat(cat_lookup, col1) + defs.delim) + raw[col1].astype(str).fillna(defs.null_val)
raw[defs.destination] = (col2cat(cat_lookup, col2) + defs.delim) + raw[col2].astype(str).fillna(defs.null_val)
if drop_edge_attrs:
raw = raw.drop(columns=[col1, col2])
if debug and (engine in [Engine.DASK, Engine.DASK_CUDF]):
raw = raw.persist()
raw.compute()
subframes.append(raw)
if len(subframes):
result_cols = list(set(
([x for x in events.columns.tolist() if not x == defs.node_type]
if not drop_edge_attrs
else [])
+ [defs.edge_type, defs.source, defs.destination, defs.event_id] # noqa: W503
+ ([defs.category] if is_using_categories else []) )) # noqa: W503
if debug and (engine in [Engine.DASK, Engine.DASK_CUDF]):
# subframes = [ df.persist() for df in subframes ]
for df in subframes:
logger.debug('format_direct_edges subdf: %s', df.dtypes)
out = concat(subframes, engine=engine, debug=debug)[ result_cols ]
if debug and (engine in [Engine.DASK, Engine.DASK_CUDF]):
out = out.persist()
out.compute()
logger.debug('////format_direct_edges')
return out
else:
return events[:0][[]]
def format_hypernodes(events, defs, drop_na):
event_nodes = events.copy()
event_nodes[defs.node_type] = defs.event_id
event_nodes[defs.category] = defs.event_type
event_nodes[defs.node_id] = event_nodes[defs.event_id]
event_nodes[defs.title] = event_nodes[defs.event_id]
return event_nodes
def hyperbinding(g, defs, entities, event_entities, edges, source, destination):
nodes = | pd.concat([entities, event_entities], ignore_index=True, sort=False) | pandas.concat |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def mysql_url() -> str:
conn = os.environ["MYSQL_URL"]
return conn
def test_mysql_without_partition(mysql_url: str) -> None:
query = "select * from test_table limit 3"
df = read_sql(mysql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([1, 2, 3], dtype="Int64"),
"test_float": pd.Series([1.1, 2.2, 3.3], dtype="float64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_mysql_with_partition(mysql_url: str) -> None:
query = "select * from test_table"
df = read_sql(
mysql_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 6], dtype="Int64"),
"test_float": pd.Series([1.1, 2.2, 3.3, 4.4, 5.5, 6.6], dtype="float64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_mysql_types(mysql_url: str) -> None:
query = "select * from test_types"
df = read_sql(mysql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_date": pd.Series(["1999-07-25", "2020-12-31", "2021-01-28"], dtype="datetime64[ns]"),
"test_time": | pd.Series(["00:00:00", "23:59:59", "12:30:30"], dtype="object") | pandas.Series |
# coding=utf-8
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Matrix games with PDHG."""
# Usage: pdhg_matrix_games.py [output directory]
import collections
import os
import sys
from . import restarted_pdhg
import cvxpy as cp
import numpy as np
import odl
import pandas as pd
np.random.seed(12)
OUTPUT_DIR = sys.argv[1]
def uniform_game(num_rows, num_cols):
return 0.5 * np.random.random_sample((num_rows, num_cols)) - 1
def normal_game(num_rows, num_cols):
return np.random.normal(size=(num_rows, num_cols))
class IndicatorSimplexConjugate(odl.solvers.functional.Functional):
"""Implements the convex conjugate of the indicator of the simplex."""
def __init__(self, space, diameter=1, sum_rtol=None):
super(IndicatorSimplexConjugate, self).__init__(
space=space, linear=False, grad_lipschitz=np.nan)
# Confuses the primal and the dual space. Luckily they're the same.
self.diameter = diameter
self.sum_rtol = sum_rtol
@property
def convex_conj(self):
"""The convex conjugate."""
return odl.solvers.IndicatorSimplex(self.domain, self.diameter,
self.sum_rtol)
class CallbackStore(odl.solvers.Callback):
def __init__(self, payoff_matrix):
self.payoff_matrix = payoff_matrix
self.residuals_at_current = []
self.residuals_at_avg = []
def __call__(self, x, y, x_avg, y_avg, did_restart):
self.residuals_at_current.append(residual(self.payoff_matrix, x, y))
self.residuals_at_avg.append(residual(self.payoff_matrix, x_avg, y_avg))
def residual(payoff_matrix, primal, dual):
return np.amax(payoff_matrix @ primal) - np.amin(payoff_matrix.T @ dual)
def solve_lp(payoff_matrix):
# This is a helper function to compute the ground truth solution of the matrix
# game. It's not used in the results presented.
x = cp.Variable(payoff_matrix.shape[1])
# The activity variables are created explicitly so we can access the duals.
activity = cp.Variable(payoff_matrix.shape[0])
prob = cp.Problem(
cp.Minimize(cp.max(activity)),
[activity == payoff_matrix @ x, cp.sum(x) == 1, x >= 0])
prob.solve(solver=cp.CVXOPT)
return prob.value, x.value, -prob.constraints[0].dual_value
def solve_game(payoff_matrix,
num_iters,
tau,
sigma,
restart,
fixed_restart_frequency=None):
linear_operator = odl.MatrixOperator(payoff_matrix)
primal_space = linear_operator.domain
dual_space = linear_operator.range
indicator_primal_simplex = odl.solvers.IndicatorSimplex(primal_space)
conjugate_of_indicator_dual_simplex = IndicatorSimplexConjugate(dual_space)
x = primal_space.zero()
y = dual_space.zero()
callback = CallbackStore(payoff_matrix)
restarted_pdhg.restarted_pdhg(
x,
f=indicator_primal_simplex,
g=conjugate_of_indicator_dual_simplex,
L=linear_operator,
niter=num_iters,
tau=tau,
sigma=sigma,
y=y,
callback=callback,
restart=restart,
fixed_restart_frequency=fixed_restart_frequency)
return callback.residuals_at_current, callback.residuals_at_avg
NUM_ITERS = 20000
NUM_REPS = 50
def generate_results(game_generator):
iteration_log = collections.defaultdict(list)
for i in range(NUM_REPS):
print(i, 'of', NUM_REPS)
payoff_matrix = game_generator()
# odl's operator norm estimates are significantly off for normal_game. The
# matrices are small enough that we can compute the exact value instead.
tau = sigma = np.sqrt(0.9) / np.linalg.norm(payoff_matrix, ord=2)
iteration_log['pdhg'].append(
solve_game(payoff_matrix, NUM_ITERS, tau, sigma, restart='none'))
iteration_log['pdhg adaptive'].append(
solve_game(payoff_matrix, NUM_ITERS, tau, sigma, restart='adaptive'))
for restart_frequency in [8, 32, 128, 512, 2048]:
residuals_at_current, residuals_at_avg = solve_game(
payoff_matrix,
NUM_ITERS,
tau,
sigma,
restart='fixed',
fixed_restart_frequency=restart_frequency)
iteration_log['pdhg restart {}'.format(restart_frequency)].append(
(residuals_at_current, residuals_at_avg))
return iteration_log
PERCENTILE = 90
def write_to_csv(iteration_log, file_name):
data = {
'iteration_num': [],
'method': [],
'median_residual_at_current': [],
'lower_range_at_current': [],
'upper_range_at_current': [],
'median_residual_at_avg': [],
'lower_range_at_avg': [],
'upper_range_at_avg': []
}
for method in iteration_log:
for it in range(NUM_ITERS):
data['iteration_num'].append(it)
data['method'].append(method)
residuals_this_iter_at_current = [
log[it] for log in iteration_log[method][0]
]
data['median_residual_at_current'].append(
np.percentile(residuals_this_iter_at_current, 50))
data['lower_range_at_current'].append(
np.percentile(residuals_this_iter_at_current, 100 - PERCENTILE))
data['upper_range_at_current'].append(
np.percentile(residuals_this_iter_at_current, PERCENTILE))
residuals_this_iter_at_avg = [log[it] for log in iteration_log[method][1]]
data['median_residual_at_avg'].append(
np.percentile(residuals_this_iter_at_avg, 50))
data['lower_range_at_avg'].append(
np.percentile(residuals_this_iter_at_avg, 100 - PERCENTILE))
data['upper_range_at_avg'].append(
np.percentile(residuals_this_iter_at_avg, PERCENTILE))
df = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 7 16:36:59 2021
@author: LaoHu
"""
from docx import Document
import pandas as pd
document = Document("test.docx")
tables = []
for table in document.tables:
df = [["" for i in range(len(table.columns))] for j in range(len(table.rows))]
for i, row in enumerate(table.rows):
for j, cell in enumerate(row.cells):
if cell.text:
df[i][j] = cell.text
tables.append( | pd.DataFrame(df) | pandas.DataFrame |
from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
import pandas.tseries.offsets as offsets
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
from pandas.compat import is_platform_windows
import pandas.util.testing as tm
from pandas import Timedelta
def test_to_offset_multiple():
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert(result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert(result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert(result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert(result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert(result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert(result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert(result == expected)
# malformed
try:
frequencies.to_offset('2h20m')
except ValueError:
pass
else:
assert(False)
def test_to_offset_negative():
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert(result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert(result.n == -310)
def test_to_offset_leading_zero():
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert(result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert(result.n == -194)
def test_to_offset_pd_timedelta():
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert(expected==result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert(expected==result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert(expected==result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert(expected==result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert(expected==result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert(result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert(expected == result)
td = Timedelta(microseconds=0)
tm.assertRaises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts():
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert(result == expected)
result1 = frequencies.to_offset('Q')
result2 = frequencies.to_offset('Q-DEC')
expected = offsets.QuarterEnd(startingMonth=12)
assert(result1 == expected)
assert(result2 == expected)
result1 = frequencies.to_offset('Q-MAY')
expected = offsets.QuarterEnd(startingMonth=5)
assert(result1 == expected)
def test_get_rule_month():
result = frequencies._get_rule_month('W')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Week())
assert(result == 'DEC')
result = frequencies._get_rule_month('D')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Day())
assert(result == 'DEC')
result = frequencies._get_rule_month('Q')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=12))
print(result == 'DEC')
result = frequencies._get_rule_month('Q-JAN')
assert(result == 'JAN')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=1))
assert(result == 'JAN')
result = frequencies._get_rule_month('A-DEC')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.YearEnd())
assert(result == 'DEC')
result = frequencies._get_rule_month('A-MAY')
assert(result == 'MAY')
result = frequencies._get_rule_month(offsets.YearEnd(month=5))
assert(result == 'MAY')
class TestFrequencyCode(tm.TestCase):
def test_freq_code(self):
self.assertEqual(frequencies.get_freq('A'), 1000)
self.assertEqual(frequencies.get_freq('3A'), 1000)
self.assertEqual(frequencies.get_freq('-1A'), 1000)
self.assertEqual(frequencies.get_freq('W'), 4000)
self.assertEqual(frequencies.get_freq('W-MON'), 4001)
self.assertEqual(frequencies.get_freq('W-FRI'), 4005)
for freqstr, code in compat.iteritems(frequencies._period_code_map):
result = frequencies.get_freq(freqstr)
self.assertEqual(result, code)
result = frequencies.get_freq_group(freqstr)
self.assertEqual(result, code // 1000 * 1000)
result = frequencies.get_freq_group(code)
self.assertEqual(result, code // 1000 * 1000)
def test_freq_group(self):
self.assertEqual(frequencies.get_freq_group('A'), 1000)
self.assertEqual(frequencies.get_freq_group('3A'), 1000)
self.assertEqual(frequencies.get_freq_group('-1A'), 1000)
self.assertEqual(frequencies.get_freq_group('A-JAN'), 1000)
self.assertEqual(frequencies.get_freq_group('A-MAY'), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd()), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=1)), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=5)), 1000)
self.assertEqual(frequencies.get_freq_group('W'), 4000)
self.assertEqual(frequencies.get_freq_group('W-MON'), 4000)
self.assertEqual(frequencies.get_freq_group('W-FRI'), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week()), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=1)), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=5)), 4000)
def test_get_to_timestamp_base(self):
tsb = frequencies.get_to_timestamp_base
self.assertEqual(tsb(frequencies.get_freq_code('D')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('W')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('M')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('S')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('T')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('H')[0]),
frequencies.get_freq_code('S')[0])
def test_freq_to_reso(self):
Reso = frequencies.Resolution
self.assertEqual(Reso.get_str_from_freq('A'), 'year')
self.assertEqual(Reso.get_str_from_freq('Q'), 'quarter')
self.assertEqual(Reso.get_str_from_freq('M'), 'month')
self.assertEqual(Reso.get_str_from_freq('D'), 'day')
self.assertEqual(Reso.get_str_from_freq('H'), 'hour')
self.assertEqual(Reso.get_str_from_freq('T'), 'minute')
self.assertEqual(Reso.get_str_from_freq('S'), 'second')
self.assertEqual(Reso.get_str_from_freq('L'), 'millisecond')
self.assertEqual(Reso.get_str_from_freq('U'), 'microsecond')
self.assertEqual(Reso.get_str_from_freq('N'), 'nanosecond')
for freq in ['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U', 'N']:
# check roundtrip
result = Reso.get_freq(Reso.get_str_from_freq(freq))
self.assertEqual(freq, result)
for freq in ['D', 'H', 'T', 'S', 'L', 'U']:
result = Reso.get_freq(Reso.get_str(Reso.get_reso_from_freq(freq)))
self.assertEqual(freq, result)
def test_get_freq_code(self):
# freqstr
self.assertEqual(frequencies.get_freq_code('A'),
(frequencies.get_freq('A'), 1))
self.assertEqual(frequencies.get_freq_code('3D'),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code('-2M'),
(frequencies.get_freq('M'), -2))
# tuple
self.assertEqual(frequencies.get_freq_code(('D', 1)),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(('A', 3)),
(frequencies.get_freq('A'), 3))
self.assertEqual(frequencies.get_freq_code(('M', -2)),
(frequencies.get_freq('M'), -2))
# numeric tuple
self.assertEqual(frequencies.get_freq_code((1000, 1)), (1000, 1))
# offsets
self.assertEqual(frequencies.get_freq_code(offsets.Day()),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Day(3)),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Day(-2)),
(frequencies.get_freq('D'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd()),
(frequencies.get_freq('M'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(3)),
(frequencies.get_freq('M'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(-2)),
(frequencies.get_freq('M'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.Week()),
(frequencies.get_freq('W'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3)),
(frequencies.get_freq('W'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2)),
(frequencies.get_freq('W'), -2))
# monday is weekday=0
self.assertEqual(frequencies.get_freq_code(offsets.Week(weekday=1)),
(frequencies.get_freq('W-TUE'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3, weekday=0)),
( | frequencies.get_freq('W-MON') | pandas.tseries.frequencies.get_freq |
#!/usr/bin/env python3
# coding: utf-8
import argparse
import csv
import io
import logging
import numpy as np
import os
import pandas as pd
import pkg_resources
import sys
import yaml
from .version import __version__
logger = logging.getLogger('root')
provided_converters = [
'mq2pin',
'mq2pcq',
'mq2psea',
'mq2elutator_trainer',
'mq2tmtc'
]
def write_df_to_file(df, headers, out_path):
with open(out_path, 'w') as f:
f.write(headers)
logger.info('Writing output to {} ...'.format(out_path))
df.to_csv(out_path, sep=output_sep, header=False,
index=write_row_names, mode='a', quoting=quoting)
def convert_files(config_file_name=None, input_list=None, input_files=None, output=None):
if config_file_name is None:
raise Exception('No configuration file (existing name or file path) provided.')
# load vars from the config file
if isinstance(config_file_name, io.BufferedReader):
config_file = config_file_name.read()
config_file_name = 'buffer'
elif config_file_name in provided_converters:
config_file = pkg_resources.resource_string('ezconvert', '/'.join(('converters', config_file_name + '.py')))
else:
logger.info('Loading config file functions from {}.'.format(config_file_name))
with open(config_file_name, 'rb') as f:
config_file = f.read()
exec(compile(config_file, config_file_name, 'exec'), globals())
# read inputs, either from the input list or from the command line
_input = []
if input_list is not None:
logger.info('Reading in input files from input list {}.'.format(input_list.name))
with open(input_list.name, 'r') as f:
_input = yaml.load(f)
else:
logger.info('Reading in input files from command line.')
_input = [f.name for f in input_files]
if len(_input) == 0:
raise Exception('No input files provided, either from the input list or the command line.')
df = pd.DataFrame()
# iterate through each input file provided.
for i, f in enumerate(_input):
# first expand user or any vars
f = os.path.expanduser(f)
f = os.path.expandvars(f)
logger.info('Reading in input file #{} | {} ...'.format(i+1, f))
dfa = | pd.read_csv(f, sep=input_sep, low_memory=False) | pandas.read_csv |
import numpy as np
import pandas as pd
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
def one_way_anova(data, target, between, summary=None):
formula = "Q('%s') ~ " % target
formula += "C(Q('%s'))" % between
model = ols(formula, data=data).fit()
result = anova_lm(model)
result = result.rename(columns={
'sum_sq' : 'Sum Square',
'mean_sq' : 'Mean Square',
'F' : 'F Statistic',
'PR(>F)' : 'p-value'
})
result = result.rename(index={
"C(Q('%s'))" % between : between
})
result2 = pd.DataFrame(
{
"Count": data.groupby(between)[target].count(),
"Mean": data.groupby(between)[target].mean(),
"Median": data.groupby(between)[target].median(),
"Std.": data.groupby(between)[target].std(),
"Variance": data.groupby(between)[target].var()
}
)
result2.index.name = None
index_change = {}
for index in result2.index:
changed = "{}({})".format(between, index)
index_change[index] = changed
result2 = result2.rename(index_change)
if summary:
return result2
else:
return result
def two_way_anova(data, target, between, summary=None):
formula = "Q('%s') ~ " % target
formula += "C(Q('%s'), Sum) * " % between[0]
formula += "C(Q('%s'), Sum)" % between[1]
model = ols(formula, data=data).fit()
result = anova_lm(model)
result = result.rename(columns={
'sum_sq' : 'Sum Square',
'mean_sq' : 'Mean Square',
'F' : 'F Statistic',
'PR(>F)' : 'p-value'
})
index_change = {}
for index in result.index:
changed = index
for var in between:
changed = changed.replace("C(Q('%s'), Sum)" % var, var)
changed = changed.replace(":", " : ")
index_change[index] = changed
result = result.rename(index_change)
result2 = | pd.DataFrame(columns=["Count", "Mean", "Median", "Std.", "Variance"]) | pandas.DataFrame |
"""Globwat diagnostic."""
import logging
from pathlib import Path
import numpy as np
import xarray as xr
import pandas as pd
import dask.array as da
import iris
from esmvalcore.preprocessor import regrid
from esmvaltool.diag_scripts.hydrology.derive_evspsblpot import debruin_pet
from esmvaltool.diag_scripts.hydrology.compute_chunks import compute_chunks
from esmvaltool.diag_scripts.shared import (ProvenanceLogger,
get_diagnostic_filename,
group_metadata,
run_diagnostic)
logger = logging.getLogger(Path(__file__).name)
def create_provenance_record():
"""Create a provenance record."""
record = {
'caption': "Forcings for the GlobWat hydrological model.",
'domains': ['global'],
'authors': [
'abdollahi_banafsheh',
'alidoost_sarah',
],
'projects': [
'ewatercycle',
],
'references': [
'acknow_project',
'debruin16ams',
'hoogeveen15hess',
'langbein1949usgs',
],
'ancestors': [],
}
return record
def rechunk_and_regrid(src, tgt, scheme):
"""Rechunk cube src and regrid it onto the grid of cube tgt."""
src_chunks = compute_chunks(src, tgt)
src.data = src.lazy_data().rechunk(src_chunks)
return regrid(src, tgt, scheme)
def change_data_type(cube):
"""Change data type to float32."""
cube.data = cube.core_data().astype('float32')
for coord_name in 'latitude', 'longitude', 'time':
coord = cube.coord(coord_name)
coord.points = coord.core_points().astype('float32')
coord.bounds = None
coord.guess_bounds()
return cube
def _convert_units(cube):
"""Convert unit of cube, used only for water variables.
From kg m-2 s-1 to kg m-2 month-1 or kg m-2 day-1.
Note that the unit kg m-2 s-1 is equivalent to mm s-1.
"""
mip = cube.attributes['mip']
if mip == 'Amon':
cube.convert_units('kg m-2 month-1') # equivalent to mm/month
elif mip == 'day':
cube.convert_units('kg m-2 day-1') # equivalent to mm/day
return cube
def _fix_negative_values(cube):
"""Change negative values to zero."""
cube.data = da.where(cube.core_data() < 0, 0, cube.core_data())
return cube
def get_input_cubes(metadata):
"""Return a dictionary with all (preprocessed) input files."""
provenance = create_provenance_record()
all_vars = {}
for attributes in metadata:
short_name = attributes['short_name']
filename = attributes['filename']
logger.info("Loading variable %s", short_name)
cube = iris.load_cube(filename)
all_vars[short_name] = change_data_type(cube)
cube.attributes['mip'] = attributes['mip']
provenance['ancestors'].append(filename)
return all_vars, provenance
def load_target(cfg):
"""Load target grid."""
filename = Path(cfg['auxiliary_data_dir']) / cfg['target_grid_file']
cube = iris.load_cube(str(filename))
for coord in 'longitude', 'latitude':
if not cube.coord(coord).has_bounds():
cube.coord(coord).guess_bounds()
return cube
def langbein_pet(tas):
"""Calculate potential ET using Langbein method.
The Langbein curve represents an empirical relationship between temperature
and potential ET (pet). Where T is the annual average temperature in degree
Celcius, pet is in mm per year, and a, b, c are unitless empirical
constants.
Reference: https://doi.org/10.3133/cir52 page 8, figure 1.
An example of using Langbein method can be found at:
https://doi.org/10.1080/02626667.2017.1332416 page 1472, equation 7.
"""
tas.convert_units('degC')
constant_a = iris.coords.AuxCoord(np.float32(325),
long_name='first constant', units=None)
constant_b = iris.coords.AuxCoord(np.float32(21),
long_name='second constant', units=None)
constant_c = iris.coords.AuxCoord(np.float32(0.9),
long_name='third constant', units=None)
# assumption here: tas is constant over time, then the monthly/daily
# average value is equal to the annual average.
pet = (tas) * constant_b + (tas ** 2) * constant_c + constant_a
pet.units = 'kg m-2 year-1' # equivalent to mm year-1
pet.convert_units('kg m-2 s-1') # convert to a cmor compatible unit
pet.var_name = 'evspsblpot'
pet.standard_name = 'water_potential_evaporation_flux'
pet.long_name = 'Potential Evapotranspiration'
return pet
def get_cube_time_info(cube):
"""Return year, month and day from the cube."""
coord_time = cube.coord('time')
time = coord_time.cell(0).point
time_step = time.strftime("%Y%m%d")
return time_step
def get_cube_data_info(cube):
"""Return short_name, and mip from the cube."""
short_name = cube.var_name
mip = cube.attributes['mip']
return short_name, mip
def _swap_western_hemisphere(cube):
"""Set longitude values in range -180, 180.
Western hemisphere longitudes should be negative.
"""
array = xr.DataArray.from_iris(cube)
# Set longitude values in range -180, 180.
array['lon'] = (array['lon'] + 180) % 360 - 180
# Re-index data along longitude values
west = array.where(array.lon < 0, drop=True)
east = array.where(array.lon >= 0, drop=True)
return west.combine_first(east)
def _flip_latitudes(array):
"""Flip latitudes for writing as ascii.
Latitudes order should be in range 90, -90.
"""
flipped = array[::-1, ...]
flipped['lat'] = array['lat'] * -1
return flipped
def save_to_ascii(cube, file_name):
"""Save data to an ascii file.
Data with index [0,0] should be in -180, 90 lon/lat.
"""
# Re-index data
array = _swap_western_hemisphere(cube)
array = _flip_latitudes(array)
# Set nodata values
array = array.fillna(-9999)
xmin = array['lon'].min().values
ymin = array['lat'].min().values
xres = array['lon'].values[1] - array['lon'].values[0]
output = open(file_name, "w")
output.write(f"ncols {array.shape[1]}\n")
output.write(f"nrows {array.shape[0]}\n")
output.write(f"xllcorner {xmin}\n")
output.write(f"yllcorner {ymin}\n")
output.write(f"cellsize {xres}\n")
output.write(f"NODATA_value {np.int32(-9999)}\n")
output.close()
data_frame = | pd.DataFrame(array.values, dtype=array.dtype) | pandas.DataFrame |
import calendar
import pickle as pkl
import pandas as pd
import numpy as np
import random
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.metrics import accuracy_score
seed = 42
random.seed(seed)
np.random.seed(seed)
#load datafile and create pickle file.
df = pd.read_csv('./kickstarter.csv')
file1 = open("model.pkl", "wb")
file2 = open("encoder.pkl", "wb")
file3 = open("scaler.pkl", "wb")
#convert months and days to a numeric equivalent.
months = list(calendar.month_name)
days = list(calendar.day_name)
df['month'] = df['month'].map(lambda x: months.index(x))
df['day'] = df['day'].map(lambda x: days.index(x))
#declare encoder for the non-numeric fields to be a binary.
encoder = OneHotEncoder(handle_unknown="ignore", sparse=False)
encoder.fit(df[['category', 'subcategory', 'month', 'day', 'hour', 'state']])
#Create the min max scalar and apply it to our parameters. Drop all uneeded columns and store the column to be predicted as our y.
X = df.drop(columns=['Unnamed: 0', 'id', 'title', 'category', 'subcategory', 'blurb', 'launch', 'deadline', 'state', 'city', 'backers', 'pledged', 'ongoing', 'location', 'success'])
columns = X.columns
X = | pd.DataFrame(X, columns=columns) | pandas.DataFrame |
import numpy as np
import pandas as pd
from bach import Series, DataFrame
from bach.operations.cut import CutOperation, QCutOperation
from sql_models.util import quote_identifier
from tests.functional.bach.test_data_and_utils import assert_equals_data
PD_TESTING_SETTINGS = {
'check_dtype': False,
'check_exact': False,
'atol': 1e-3,
}
def compare_boundaries(expected: pd.Series, result: Series) -> None:
for exp, res in zip(expected.to_numpy(), result.to_numpy()):
if not isinstance(exp, pd.Interval):
assert res is None or np.isnan(res)
continue
np.testing.assert_almost_equal(exp.left, float(res.left), decimal=2)
np.testing.assert_almost_equal(exp.right, float(res.right), decimal=2)
if exp.closed_left:
assert res.closed_left
if exp.closed_right:
assert res.closed_right
def test_cut_operation_pandas(engine) -> None:
p_series = pd.Series(range(100), name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
expected = pd.cut(p_series, bins=10)
result = CutOperation(series=series, bins=10)()
compare_boundaries(expected, result)
expected_wo_right = pd.cut(p_series, bins=10, right=False)
result_wo_right = CutOperation(series, bins=10, right=False)()
compare_boundaries(expected_wo_right, result_wo_right)
def test_cut_operation_bach(engine) -> None:
p_series = pd.Series(range(100), name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
ranges = [
pd.Interval(0, 9.9, closed='both'),
pd.Interval(9.9, 19.8, closed='right'),
pd.Interval(19.8, 29.7, closed='right'),
pd.Interval(29.7, 39.6, closed='right'),
pd.Interval(39.6, 49.5, closed='right'),
pd.Interval(49.5, 59.4, closed='right'),
pd.Interval(59.4, 69.3, closed='right'),
pd.Interval(69.3, 79.2, closed='right'),
pd.Interval(79.2, 89.1, closed='right'),
pd.Interval(89.1, 99, closed='right'),
]
expected = pd.Series({num: ranges[int(num / 10)] for num in range(100)})
result = CutOperation(series=series, bins=10, method='bach')().sort_index()
compare_boundaries(expected, result)
ranges_wo_right = [
pd.Interval(0, 9.9, closed='left'),
pd.Interval(9.9, 19.8, closed='left'),
pd.Interval(19.8, 29.7, closed='left'),
pd.Interval(29.7, 39.6, closed='left'),
pd.Interval(39.6, 49.5, closed='left'),
pd.Interval(49.5, 59.4, closed='left'),
pd.Interval(59.4, 69.3, closed='left'),
pd.Interval(69.3, 79.2, closed='left'),
pd.Interval(79.2, 89.1, closed='left'),
pd.Interval(89.1, 99, closed='both'),
]
expected_wo_right = pd.Series({num: ranges_wo_right[int(num / 10)] for num in range(100)})
result_wo_right = CutOperation(series=series, bins=10, method='bach', right=False)().sort_index()
compare_boundaries(expected_wo_right, result_wo_right)
def test_cut_operation_boundary(engine) -> None:
bins = 3
p_series = pd.Series(data=[1, 2, 3, 4], name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
expected = pd.cut(p_series, bins=bins, right=True)
result = CutOperation(series=series, bins=bins, right=True)()
compare_boundaries(expected, result)
def test_cut_w_ignore_index(engine) -> None:
bins = 3
p_series = pd.Series(data=[1, 2, 3, 4], name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
result = CutOperation(series=series, bins=bins, right=True, ignore_index=False)()
assert ['_index_0', 'a'] == list(result.index.keys())
result_w_ignore = CutOperation(series=series, bins=bins, right=True, ignore_index=True)()
assert ['a'] == list(result_w_ignore.index.keys())
def test_cut_w_include_empty_bins(engine) -> None:
bins = 3
p_series = pd.Series(data=[1, 1, 2, 3, 6, 7, 8], name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
result = CutOperation(
series=series, bins=bins, include_empty_bins=True,
)().sort_index()
empty_interval = pd.Interval(3.333, 5.667)
expected_data = [
pd.Interval(0.993, 3.333),
pd.Interval(0.993, 3.333),
pd.Interval(0.993, 3.333),
pd.Interval(0.993, 3.333),
pd.Interval(5.667, 8),
pd.Interval(5.667, 8),
pd.Interval(5.667, 8),
empty_interval,
]
expected_index = [1., 1., 2., 3., 6., 7., 8., np.nan]
expected = pd.Series(data=expected_data, index=expected_index)
compare_boundaries(expected, result)
def test_cut_operation_calculate_bucket_properties(engine) -> None:
final_properties = ['a_min', 'a_max', 'bin_adjustment', 'step']
bins = 2
# min != max
p_series_neq = pd.Series(data=[1, 3, 5, 16, 2, 20], name='a')
series_neq = DataFrame.from_pandas(engine=engine, df=p_series_neq.to_frame(), convert_objects=True).a
result_neq = CutOperation(series=series_neq, bins=bins)._calculate_bucket_properties()
expected_neq = pd.DataFrame(
data={
'a_min': [1], # min(a) - min_adjustment
'a_max': [20], # max(a) + max_adjustment
'min_adjustment': [0], # min(a) != max(a)
'max_adjustment': [0], # min(a) != max(a)
'bin_adjustment': [0.019], # (max(a) - min(a)) * range_adjustment
'step': [9.5], # (max(a) - min(a)) / bins
},
)
pd.testing.assert_frame_equal(expected_neq[final_properties], result_neq.to_pandas(), check_dtype=False)
# min == max
p_series_eq = pd.Series(data=[2, 2], name='a')
series_eq = DataFrame.from_pandas(engine=engine, df=p_series_eq.to_frame(), convert_objects=True).a
result_eq = CutOperation(series=series_eq, bins=bins)._calculate_bucket_properties()
expected_eq = pd.DataFrame(
data={
'a_min': [1.998],
'a_max': [2.002],
'min_adjustment': [0.002], # if min(a) == max(a): range_adjustment * abs(min(a))
'max_adjustment': [0.002], # if min(a) == max(a): range_adjustment * abs(max(a))
'bin_adjustment': [0.],
'step': [0.002],
},
)
pd.testing.assert_frame_equal(expected_eq[final_properties], result_eq.to_pandas(), **PD_TESTING_SETTINGS)
# min == max == 0
p_series_zero = pd.Series(data=[0, 0, 0, 0], name='a')
series_zero = DataFrame.from_pandas(engine=engine, df=p_series_zero.to_frame(), convert_objects=True).a
result_zero = CutOperation(series=series_zero, bins=bins)._calculate_bucket_properties()
expected_zero = pd.DataFrame(
data={
'a_min': [-0.001],
'a_max': [0.001],
'min_adjustment': [0.001], # if min(a) == max(a) == 0: range_adjustment
'max_adjustment': [0.001], # if min(a) == max(a) == 0: range_adjustment
'bin_adjustment': [0.],
'step': [0.001],
},
)
pd.testing.assert_frame_equal(expected_zero[final_properties], result_zero.to_pandas(), **PD_TESTING_SETTINGS)
def test_cut_calculate_pandas_adjustments(engine) -> None:
pdf = pd.DataFrame(data={'min': [1], 'max': [100]})
df = DataFrame.from_pandas(engine=engine, df=pdf, convert_objects=True)
to_adjust = df['min']
to_compare = df['max']
result = CutOperation(series=df['min'], bins=1)._calculate_pandas_adjustments(to_adjust, to_compare)
assert isinstance(result, Series)
result_case_sql = result.expression.to_sql(df.engine.dialect)
max_identifier = quote_identifier(engine, 'max')
min_idenfifier = quote_identifier(engine, 'min')
expected_case_sql = (
f'case when {max_identifier} = {min_idenfifier} then\n'
f'case when {min_idenfifier} != 0 then 0.001 * abs({min_idenfifier}) else 0.001 end\n'
'else 0 end'
)
assert expected_case_sql == result_case_sql
def test_cut_calculate_bucket_ranges(engine) -> None:
bins = 3
p_series = pd.Series(data=[1, 1, 2, 3, 4, 5, 6, 7, 8], name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
cut_operation = CutOperation(series=series, bins=bins)
bucket_properties_df = cut_operation._calculate_bucket_properties()
result = cut_operation._calculate_bucket_ranges(bucket_properties_df)
assert_equals_data(
result,
order_by=['lower_bound'],
expected_columns=['bucket', 'lower_bound', 'upper_bound', 'bounds'],
expected_data=[
[1, 0.993, 3.333, '(]'],
[2, 3.333, 5.667, '(]'],
[3, 5.667, 8, '(]'],
],
round_decimals=True,
decimal=3,
)
def test_qcut_operation(engine) -> None:
p_series = pd.Series(range(100), name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
expected_w_list = pd.qcut(p_series, q=[0.25, 0.3, 0.7, 0.9])
result_w_list = QCutOperation(series=series, q=[0.25, 0.3, 0.7, 0.9])()
compare_boundaries(expected_w_list, result_w_list)
expected_q_num = pd.qcut(p_series, q=4)
result_q_num = QCutOperation(series=series, q=4)()
compare_boundaries(expected_q_num, result_q_num)
def test_qcut_operation_one_quantile(engine) -> None:
p_series = pd.Series(range(10), name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
expected = pd.qcut(p_series, q=0)
result = QCutOperation(series=series, q=0)()
compare_boundaries(expected, result)
expected2 = pd.qcut(p_series, q=[0.5])
result2 = QCutOperation(series=series, q=[0.5])()
compare_boundaries(expected2, result2)
def test_get_quantile_ranges(engine) -> None:
p_series = pd.Series(data=[1, 1, 2, 3, 4, 5, 6, 7, 8], name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
qcut_operation = QCutOperation(series=series, q=[0.25, 0.5])
result = qcut_operation._get_quantile_ranges()
assert_equals_data(
result,
order_by=['lower_bound'],
expected_columns=['lower_bound', 'upper_bound', 'bounds'],
expected_data=[
[1.999, 4., '(]'],
[4., None, '(]'],
],
round_decimals=True,
decimal=3,
)
def test_qcut_w_duplicated_quantiles(engine) -> None:
p_series = pd.Series(data=[0, 1, 2, 2, 2, 2, 2], name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
expected = | pd.qcut(p_series, q=[0.25, 0.5, 0.75], duplicates='drop') | pandas.qcut |
# Part 1 - Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import datetime
import warnings
warnings.filterwarnings('ignore')
# Importing the training set
df_raw = pd.read_csv('Google_Stock_Price_Train.csv')
df = df_raw
df.columns
df = df_raw.drop(['Open', 'High', 'Low', 'Volume'], axis=1)
df['Date'] = | pd.to_datetime(df['Date'], infer_datetime_format=True) | pandas.to_datetime |
import re
from datetime import datetime, timedelta
import numpy as np
import pandas.compat as compat
import pandas as pd
from pandas.compat import u, StringIO
from pandas.core.base import FrozenList, FrozenNDArray, DatetimeIndexOpsMixin
from pandas.util.testing import assertRaisesRegexp, assert_isinstance
from pandas import Series, Index, Int64Index, DatetimeIndex, PeriodIndex
from pandas import _np_version_under1p7
import pandas.tslib as tslib
import nose
import pandas.util.testing as tm
class CheckStringMixin(object):
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
if not compat.PY3:
unicode(self.container)
def test_tricky_container(self):
if not hasattr(self, 'unicode_container'):
raise nose.SkipTest('Need unicode_container to test with this')
repr(self.unicode_container)
str(self.unicode_container)
bytes(self.unicode_container)
if not compat.PY3:
unicode(self.unicode_container)
class CheckImmutable(object):
mutable_regex = re.compile('does not support mutable operations')
def check_mutable_error(self, *args, **kwargs):
# pass whatever functions you normally would to assertRaises (after the Exception kind)
assertRaisesRegexp(TypeError, self.mutable_regex, *args, **kwargs)
def test_no_mutable_funcs(self):
def setitem(): self.container[0] = 5
self.check_mutable_error(setitem)
def setslice(): self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem(): del self.container[0]
self.check_mutable_error(delitem)
def delslice(): del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert_isinstance(result, klass)
self.assertEqual(result, expected)
class TestFrozenList(CheckImmutable, CheckStringMixin, tm.TestCase):
mutable_methods = ('extend', 'pop', 'remove', 'insert')
unicode_container = FrozenList([u("\u05d0"), u("\u05d1"), "c"])
def setUp(self):
self.lst = [1, 2, 3, 4, 5]
self.container = FrozenList(self.lst)
self.klass = FrozenList
def test_add(self):
result = self.container + (1, 2, 3)
expected = FrozenList(self.lst + [1, 2, 3])
self.check_result(result, expected)
result = (1, 2, 3) + self.container
expected = FrozenList([1, 2, 3] + self.lst)
self.check_result(result, expected)
def test_inplace(self):
q = r = self.container
q += [5]
self.check_result(q, self.lst + [5])
# other shouldn't be mutated
self.check_result(r, self.lst)
class TestFrozenNDArray(CheckImmutable, CheckStringMixin, tm.TestCase):
mutable_methods = ('put', 'itemset', 'fill')
unicode_container = FrozenNDArray([u("\u05d0"), u("\u05d1"), "c"])
def setUp(self):
self.lst = [3, 5, 7, -2]
self.container = FrozenNDArray(self.lst)
self.klass = FrozenNDArray
def test_shallow_copying(self):
original = self.container.copy()
assert_isinstance(self.container.view(), FrozenNDArray)
self.assertFalse(isinstance(self.container.view(np.ndarray), FrozenNDArray))
self.assertIsNot(self.container.view(), self.container)
self.assert_numpy_array_equal(self.container, original)
# shallow copy should be the same too
assert_isinstance(self.container._shallow_copy(), FrozenNDArray)
# setting should not be allowed
def testit(container): container[0] = 16
self.check_mutable_error(testit, self.container)
def test_values(self):
original = self.container.view(np.ndarray).copy()
n = original[0] + 15
vals = self.container.values()
self.assert_numpy_array_equal(original, vals)
self.assertIsNot(original, vals)
vals[0] = n
self.assert_numpy_array_equal(self.container, original)
self.assertEqual(vals[0], n)
class Ops(tm.TestCase):
def setUp(self):
self.int_index = tm.makeIntIndex(10)
self.float_index = tm.makeFloatIndex(10)
self.dt_index = tm.makeDateIndex(10)
self.dt_tz_index = tm.makeDateIndex(10).tz_localize(tz='US/Eastern')
self.period_index = tm.makePeriodIndex(10)
self.string_index = tm.makeStringIndex(10)
arr = np.random.randn(10)
self.int_series = Series(arr, index=self.int_index)
self.float_series = Series(arr, index=self.int_index)
self.dt_series = Series(arr, index=self.dt_index)
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index)
self.string_series = Series(arr, index=self.string_index)
types = ['int','float','dt', 'dt_tz', 'period','string']
self.objs = [ getattr(self,"{0}_{1}".format(t,f)) for t in types for f in ['index','series'] ]
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index,op),index=o.index)
else:
expected = getattr(o,op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o,op)
# these couuld be series, arrays or scalars
if isinstance(result,Series) and isinstance(expected,Series):
tm.assert_series_equal(result,expected)
elif isinstance(result,Index) and isinstance(expected,Index):
tm.assert_index_equal(result,expected)
elif isinstance(result,np.ndarray) and isinstance(expected,np.ndarray):
self.assert_numpy_array_equal(result,expected)
else:
self.assertEqual(result, expected)
# freq raises AttributeError on an Int64Index because its not defined
# we mostly care about Series hwere anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError, otherwise
# an AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
self.assertRaises(TypeError, lambda : getattr(o,op))
else:
self.assertRaises(AttributeError, lambda : getattr(o,op))
class TestIndexOps(Ops):
def setUp(self):
super(TestIndexOps, self).setUp()
self.is_valid_objs = [ o for o in self.objs if o._allow_index_ops ]
self.not_valid_objs = [ o for o in self.objs if not o._allow_index_ops ]
def test_ops(self):
tm._skip_if_not_numpy17_friendly()
for op in ['max','min']:
for o in self.objs:
result = getattr(o,op)()
if not isinstance(o, PeriodIndex):
expected = getattr(o.values, op)()
else:
expected = pd.Period(ordinal=getattr(o.values, op)(), freq=o.freq)
try:
self.assertEqual(result, expected)
except ValueError:
# comparing tz-aware series with np.array results in ValueError
expected = expected.astype('M8[ns]').astype('int64')
self.assertEqual(result.value, expected)
def test_nanops(self):
# GH 7261
for op in ['max','min']:
for klass in [Index, Series]:
obj = klass([np.nan, 2.0])
self.assertEqual(getattr(obj, op)(), 2.0)
obj = klass([np.nan])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = klass([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = klass([pd.NaT, datetime(2011, 11, 1)])
# check DatetimeIndex monotonic path
self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1))
obj = klass([pd.NaT, datetime(2011, 11, 1), pd.NaT])
# check DatetimeIndex non-monotonic path
self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1))
def test_value_counts_unique_nunique(self):
for o in self.objs:
klass = type(o)
values = o.values
# create repeated values, 'n'th element is repeated by n+1 times
if isinstance(o, PeriodIndex):
# freq must be specified because repeat makes freq ambiguous
o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq)
else:
o = klass(np.repeat(values, range(1, len(o) + 1)))
expected_s = Series(range(10, 0, -1), index=values[::-1], dtype='int64')
tm.assert_series_equal(o.value_counts(), expected_s)
if isinstance(o, DatetimeIndex):
# DatetimeIndex.unique returns DatetimeIndex
self.assertTrue(o.unique().equals(klass(values)))
else:
self.assert_numpy_array_equal(o.unique(), values)
self.assertEqual(o.nunique(), len(np.unique(o.values)))
for null_obj in [np.nan, None]:
for o in self.objs:
klass = type(o)
values = o.values
if o.values.dtype == 'int64':
# skips int64 because it doesn't allow to include nan or None
continue
if o.values.dtype == 'datetime64[ns]' and _np_version_under1p7:
# Unable to assign None
continue
# special assign to the numpy array
if o.values.dtype == 'datetime64[ns]':
values[0:2] = pd.tslib.iNaT
else:
values[0:2] = null_obj
# create repeated values, 'n'th element is repeated by n+1 times
if isinstance(o, PeriodIndex):
o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq)
else:
o = klass(np.repeat(values, range(1, len(o) + 1)))
if isinstance(o, DatetimeIndex):
expected_s_na = Series(list(range(10, 2, -1)) + [3], index=values[9:0:-1])
expected_s = Series(list(range(10, 2, -1)), index=values[9:1:-1])
else:
expected_s_na = Series(list(range(10, 2, -1)) +[3], index=values[9:0:-1], dtype='int64')
expected_s = Series(list(range(10, 2, -1)), index=values[9:1:-1], dtype='int64')
tm.assert_series_equal(o.value_counts(dropna=False), expected_s_na)
tm.assert_series_equal(o.value_counts(), expected_s)
# numpy_array_equal cannot compare arrays includes nan
result = o.unique()
self.assert_numpy_array_equal(result[1:], values[2:])
if isinstance(o, DatetimeIndex):
self.assertTrue(result[0] is pd.NaT)
else:
self.assertTrue(pd.isnull(result[0]))
self.assertEqual(o.nunique(), 8)
self.assertEqual(o.nunique(dropna=False), 9)
def test_value_counts_inferred(self):
klasses = [Index, Series]
for klass in klasses:
s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(s.value_counts(), expected)
self.assert_numpy_array_equal(s.unique(), np.unique(s_values))
self.assertEqual(s.nunique(), 4)
# don't sort, have to sort after the fact as not sorting is platform-dep
hist = s.value_counts(sort=False)
hist.sort()
expected = Series([3, 1, 4, 2], index=list('acbd'))
expected.sort()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list('cdab'))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([.4, .3, .2, .1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(hist, expected)
# bins
self.assertRaises(TypeError, lambda bins: s.value_counts(bins=bins), 1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({0.998: 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({0.998: 1.0})
tm.assert_series_equal(res1n, exp1n)
self.assert_numpy_array_equal(s1.unique(), np.array([1, 2, 3]))
self.assertEqual(s1.nunique(), 3)
res4 = s1.value_counts(bins=4)
exp4 = Series({0.998: 2, 1.5: 1, 2.0: 0, 2.5: 1}, index=[0.998, 2.5, 1.5, 2.0])
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series({0.998: 0.5, 1.5: 0.25, 2.0: 0.0, 2.5: 0.25}, index=[0.998, 2.5, 1.5, 2.0])
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ['a', 'b', 'b', 'b', np.nan, np.nan, 'd', 'd', 'a', 'a', 'b']
s = klass(s_values)
expected = Series([4, 3, 2], index=['b', 'a', 'd'])
tm.assert_series_equal(s.value_counts(), expected)
self.assert_numpy_array_equal(s.unique(), np.array(['a', 'b', np.nan, 'd'], dtype='O'))
self.assertEqual(s.nunique(), 3)
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected)
self.assert_numpy_array_equal(s.unique(), np.array([]))
self.assertEqual(s.nunique(), 0)
# GH 3002, datetime64[ns]
txt = "\n".join(['xxyyzz20100101PIE', 'xxyyzz20100101GUM', 'xxyyzz20100101EGG',
'xxyyww20090101EGG', 'foofoo20080909PIE', 'foofoo20080909GUM'])
f = StringIO(txt)
df = pd.read_fwf(f, widths=[6, 8, 3], names=["person_id", "dt", "food"],
parse_dates=["dt"])
s = klass(df['dt'].copy())
idx = pd.to_datetime(['2010-01-01 00:00:00Z', '2008-09-09 00:00:00Z', '2009-01-01 00:00:00X'])
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np.array(['2010-01-01 00:00:00Z', '2009-01-01 00:00:00Z', '2008-09-09 00:00:00Z'],
dtype='datetime64[ns]')
if isinstance(s, DatetimeIndex):
expected = DatetimeIndex(expected)
self.assertTrue(s.unique().equals(expected))
else:
self.assert_numpy_array_equal(s.unique(), expected)
self.assertEqual(s.nunique(), 3)
# with NaT
s = df['dt'].copy()
s = klass([v for v in s.values] + [pd.NaT])
result = s.value_counts()
self.assertEqual(result.index.dtype, 'datetime64[ns]')
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
self.assertEqual(unique.dtype, 'datetime64[ns]')
# numpy_array_equal cannot compare pd.NaT
self.assert_numpy_array_equal(unique[:3], expected)
self.assertTrue(unique[3] is pd.NaT or unique[3].astype('int64') == pd.tslib.iNaT)
self.assertEqual(s.nunique(), 3)
self.assertEqual(s.nunique(dropna=False), 4)
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td)
result = td.value_counts()
expected_s = Series([6], index=[86400000000000])
self.assertEqual(result.index.dtype, 'int64')
tm.assert_series_equal(result, expected_s)
# get nanoseconds to compare
expected = np.array([86400000000000])
self.assert_numpy_array_equal(td.unique(), expected)
self.assertEqual(td.nunique(), 1)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2)
result2 = td2.value_counts()
self.assertEqual(result2.index.dtype, 'int64')
tm.assert_series_equal(result2, expected_s)
self.assert_numpy_array_equal(td.unique(), expected)
self.assertEqual(td.nunique(), 1)
def test_factorize(self):
for o in self.objs:
exp_arr = np.array(range(len(o)))
labels, uniques = o.factorize()
self.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
expected = Index(o.values)
self.assert_numpy_array_equal(uniques, expected)
else:
self.assertTrue(uniques.equals(o))
for o in self.objs:
# sort by value, and create duplicates
if isinstance(o, Series):
o.sort()
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].append(o)
exp_arr = np.array([5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
labels, uniques = n.factorize(sort=True)
self.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
expected = Index(o.values)
self.assert_numpy_array_equal(uniques, expected)
else:
self.assertTrue(uniques.equals(o))
exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4])
labels, uniques = n.factorize(sort=False)
self.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
expected = Index(np.concatenate([o.values[5:10], o.values[:5]]))
self.assert_numpy_array_equal(uniques, expected)
else:
expected = o[5:].append(o[:5])
self.assertTrue(uniques.equals(expected))
class TestDatetimeIndexOps(Ops):
_allowed = '_allow_datetime_index_ops'
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: x._allow_datetime_index_ops or x._allow_period_index_ops
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
def test_ops_properties(self):
self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter'])
self.check_ops_properties(['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start',
'is_quarter_end', 'is_year_start', 'is_year_end'], lambda x: isinstance(x,DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year','day','second','weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series,op))
# attribute access should still work!
s = Series(dict(year=2000,month=1,day=10))
self.assertEquals(s.year,2000)
self.assertEquals(s.month,1)
self.assertEquals(s.day,10)
self.assertRaises(AttributeError, lambda : s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx')
expected_list = [pd.Timestamp('2013-01-31'), pd.Timestamp('2013-02-28'),
pd.Timestamp('2013-03-31'), pd.Timestamp('2013-04-30')]
expected = | pd.Index(expected_list, dtype=object, name='idx') | pandas.Index |
import pandas as pd
def convert_to_datetime_idx_df(data):
df = | pd.DataFrame(data) | pandas.DataFrame |
"""The Model class is the main object for creating model in Pastas.
Examples
--------
>>> oseries = pd.Series([1,2,1], index=pd.to_datetime(range(3), unit="D"))
>>> ml = Model(oseries)
"""
from collections import OrderedDict
from copy import copy
from inspect import isclass
from logging import getLogger
from os import getlogin
import numpy as np
import pandas as pd
from .decorators import get_stressmodel
from .io.base import dump, load_model
from .modelstats import Statistics
from .noisemodels import NoiseModel
from .plots import Plotting
from .solver import LeastSquares
from .stressmodels import Constant
from .timeseries import TimeSeries
from .utils import get_dt, get_time_offset, get_sample, \
frequency_is_supported, validate_name
from .version import __version__
class Model:
"""Initiates a time series model.
Parameters
----------
oseries: pandas.Series or pastas.TimeSeries
pandas Series object containing the dependent time series. The
observation can be non-equidistant.
constant: bool, optional
Add a constant to the model (Default=True).
noisemodel: bool, optional
Add the default noisemodel to the model. A custom noisemodel can be
added later in the modelling process as well.
name: str, optional
String with the name of the model, used in plotting and saving.
metadata: dict, optional
Dictionary containing metadata of the oseries, passed on the to
oseries when creating a pastas TimeSeries object. hence,
ml.oseries.metadata will give you the metadata.
Returns
-------
ml: pastas.Model
Pastas Model instance, the base object in Pastas.
Examples
--------
>>> oseries = pd.Series([1,2,1], index=pd.to_datetime(range(3), unit="D"))
>>> ml = Model(oseries)
"""
def __init__(self, oseries, constant=True, noisemodel=True, name=None,
metadata=None):
self.logger = getLogger(__name__)
# Construct the different model components
self.oseries = TimeSeries(oseries, settings="oseries",
metadata=metadata)
if name is None:
name = self.oseries.name
if name is None:
name = 'Observations'
self.name = validate_name(name)
self.parameters = pd.DataFrame(
columns=["initial", "name", "optimal", "pmin", "pmax", "vary",
"stderr"])
# Define the model components
self.stressmodels = OrderedDict()
self.constant = None
self.transform = None
self.noisemodel = None
# Default solve/simulation settings
self.settings = {
"tmin": None,
"tmax": None,
"freq": "D",
"warmup": pd.Timedelta(days=3650),
"time_offset": pd.Timedelta(0),
"noise": noisemodel,
"solver": None,
"fit_constant": True,
}
if constant:
constant = Constant(initial=self.oseries.series.mean(),
name="constant")
self.add_constant(constant)
if noisemodel:
self.add_noisemodel(NoiseModel())
# File Information
self.file_info = self.get_file_info()
# initialize some attributes for solving and simulation
self.sim_index = None
self.oseries_calib = None
self.interpolate_simulation = None
self.normalize_residuals = False
self.fit = None
# Load other modules
self.stats = Statistics(self)
self.plots = Plotting(self)
self.plot = self.plots.plot # because we are lazy
def __repr__(self):
"""Prints a simple string representation of the model.
"""
template = ('{cls}(oseries={os}, name={name}, constant={const}, '
'noisemodel={noise})')
return template.format(cls=self.__class__.__name__,
os=self.oseries.name,
name=self.name,
const=not self.constant is None,
noise=not self.noisemodel is None)
def add_stressmodel(self, stressmodel, *args, replace=False):
"""Adds a stressmodel to the main model.
Parameters
----------
stressmodel: pastas.stressmodel.stressmodelBase
instance of a pastas.stressmodel object. Multiple stress models
can be provided (e.g., ml.add_stressmodel(sm1, sm2) in one call.
replace: bool, optional
replace the stressmodel if a stressmodel with the same name
already exists. Not recommended but useful at times. Default is
False.
Notes
-----
To obtain a list of the stressmodel names, type:
>>> ml.stressmodels.keys()
Examples
--------
>>> sm = ps.StressModel(stress, rfunc=ps.Gamma, name="stress")
>>> ml.add_stressmodel(sm)
"""
# Method can take multiple stressmodels at once through args
if args:
for arg in args:
self.add_stressmodel(arg)
if (stressmodel.name in self.stressmodels.keys()) and not replace:
self.logger.error("The name for the stressmodel you are trying "
"to add already exists for this model. Select "
"another name.")
else:
self.stressmodels[stressmodel.name] = stressmodel
self.parameters = self.get_init_parameters(initial=False)
if self.settings["freq"] is None:
self._set_freq()
stressmodel.update_stress(freq=self.settings["freq"])
# Check if stress overlaps with oseries, if not give a warning
if (stressmodel.tmin > self.oseries.series.index.max()) or \
(stressmodel.tmax < self.oseries.series.index.min()):
self.logger.warning("The stress of the stressmodel has no "
"overlap with ml.oseries.")
def add_constant(self, constant):
"""Adds a Constant to the time series Model.
Parameters
----------
constant: pastas.Constant
Pastas constant instance, possibly more things in the future.
Examples
--------
>>> d = ps.Constant()
>>> ml.add_constant(d)
"""
self.constant = constant
self.parameters = self.get_init_parameters(initial=False)
def add_transform(self, transform):
"""Adds a Transform to the time series Model.
Parameters
----------
transform: pastas.transform
instance of a pastas.transform object.
Examples
--------
>>> tt = ps.ThresholdTransform()
>>> ml.add_transform(tt)
"""
if isclass(transform):
# keep this line for backwards compatibility for now
transform = transform()
transform.set_model(self)
self.transform = transform
self.parameters = self.get_init_parameters(initial=False)
def add_noisemodel(self, noisemodel):
"""Adds a noisemodel to the time series Model.
Parameters
----------
noisemodel: pastas.noisemodels.NoiseModelBase
Instance of NoiseModelBase
Examples
--------
>>> n = ps.NoiseModel()
>>> ml.add_noisemodel(n)
"""
self.noisemodel = noisemodel
self.noisemodel.set_init_parameters(oseries=self.oseries.series)
self.parameters = self.get_init_parameters(initial=False)
# check whether noise_alpha is not smaller than ml.settings["freq"]
freq_in_days = get_dt(self.settings["freq"])
noise_alpha = self.noisemodel.parameters.initial.iloc[0]
if freq_in_days > noise_alpha:
self.set_initial("noise_alpha", freq_in_days)
@get_stressmodel
def del_stressmodel(self, name):
""" Safely delete a stressmodel from the stressmodels dict.
Parameters
----------
name: str
string with the name of the stressmodel object.
Notes
-----
To obtain a list of the stressmodel names type:
>>> ml.stressmodels.keys()
"""
self.stressmodels.pop(name, None)
self.parameters = self.get_init_parameters(initial=False)
def del_constant(self):
""" Safely delete the constant from the Model.
"""
if self.constant is None:
self.logger.warning("No constant is present in this model.")
else:
self.constant = None
self.parameters = self.get_init_parameters(initial=False)
def del_transform(self):
"""Safely delete the transform from the Model.
"""
if self.transform is None:
self.logger.warning("No transform is present in this model.")
else:
self.transform = None
self.parameters = self.get_init_parameters(initial=False)
def del_noisemodel(self):
"""Safely delete the noisemodel from the Model.
"""
if self.noisemodel is None:
self.logger.warning("No noisemodel is present in this model.")
else:
self.noisemodel = None
self.parameters = self.get_init_parameters(initial=False)
def simulate(self, parameters=None, tmin=None, tmax=None, freq=None,
warmup=None, return_warmup=False):
"""Method to simulate the time series model.
Parameters
----------
parameters: array-like, optional
Array with the parameters used in the time series model. See
Model.get_parameters() for more info if parameters is None.
tmin: str, optional
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: str, optional
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
freq: str, optional
String with the frequency the stressmodels are simulated. Must
be one of the following: (D, h, m, s, ms, us, ns) or a multiple of
that e.g. "7D".
warmup: float/int, optional
Warmup period (in Days).
return_warmup: bool, optional
Return the simulation including the the warmup period or not,
default is False.
Returns
-------
sim: pandas.Series
pandas.Series containing the simulated time series
Notes
-----
This method can be used without any parameters. When the model is
solved, the optimal parameters values are used and if not,
the initial parameter values are used. This allows the user to
get an idea of how the simulation looks with only the initial
parameters and no calibration.
"""
# Default options when tmin, tmax, freq and warmup are not provided.
if tmin is None and self.settings['tmin']:
tmin = self.settings['tmin']
else:
tmin = self.get_tmin(tmin, freq, use_oseries=False,
use_stresses=True)
if tmax is None and self.settings['tmax']:
tmax = self.settings['tmax']
else:
tmax = self.get_tmax(tmax, freq, use_oseries=False,
use_stresses=True)
if freq is None:
freq = self.settings["freq"]
if warmup is None:
warmup = self.settings["warmup"]
elif not isinstance(warmup, pd.Timedelta):
warmup = pd.Timedelta(days=warmup)
# Get the simulation index and the time step
sim_index = self.get_sim_index(tmin, tmax, freq, warmup)
dt = get_dt(freq)
# Get parameters if none are provided
if parameters is None:
parameters = self.get_parameters()
sim = pd.Series(data=np.zeros(sim_index.size, dtype=float),
index=sim_index, fastpath=True)
istart = 0 # Track parameters index to pass to stressmodel object
for sm in self.stressmodels.values():
contrib = sm.simulate(parameters[istart: istart + sm.nparam],
sim_index.min(), sim_index.max(), freq, dt)
sim = sim.add(contrib)
istart += sm.nparam
if self.constant:
sim = sim + self.constant.simulate(parameters[istart])
istart += 1
if self.transform:
sim = self.transform.simulate(sim, parameters[
istart:istart + self.transform.nparam])
# Respect provided tmin/tmax at this point, since warmup matters for
# simulation but should not be returned, unless return_warmup=True.
if not return_warmup:
sim = sim.loc[tmin:tmax]
if sim.hasnans:
sim = sim.dropna()
self.logger.warning('Nan-values were removed from the simulation.')
sim.name = 'Simulation'
return sim
def residuals(self, parameters=None, tmin=None, tmax=None, freq=None,
warmup=None):
"""Method to calculate the residual series.
Parameters
----------
parameters: list, optional
Array of the parameters used in the time series model. See
Model.get_parameters() for more info if parameters is None.
tmin: str, optional
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: str, optional
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
freq: str, optional
String with the frequency the stressmodels are simulated. Must
be one of the following: (D, h, m, s, ms, us, ns) or a multiple of
that e.g. "7D".
warmup: float/int, optional
Warmup period (in Days).
Returns
-------
res: pandas.Series
pandas.Series with the residuals series.
"""
# Default options when tmin, tmax, freq and warmup are not provided.
if tmin is None:
tmin = self.settings['tmin']
if tmax is None:
tmax = self.settings['tmax']
if freq is None:
freq = self.settings["freq"]
if warmup is None:
warmup = self.settings["warmup"]
else:
warmup = pd.Timedelta(days=warmup)
# simulate model
sim = self.simulate(parameters, tmin, tmax, freq, warmup,
return_warmup=False)
# Get the oseries calibration series
oseries_calib = self.observations(tmin, tmax, freq)
# Get simulation at the correct indices
if self.interpolate_simulation is None:
if oseries_calib.index.difference(sim.index).size is not 0:
self.interpolate_simulation = True
self.logger.info('There are observations between the '
'simulation timesteps. Linear interpolation '
'between simulated values is used.')
if self.interpolate_simulation:
# interpolate simulation to times of observations
sim_interpolated = np.interp(oseries_calib.index.asi8,
sim.index.asi8, sim.values)
else:
# all of the observation indexes are in the simulation
sim_interpolated = sim.reindex(oseries_calib.index)
# Calculate the actual residuals here
res = oseries_calib.subtract(sim_interpolated)
if res.hasnans:
res = res.dropna()
self.logger.warning('Nan-values were removed from the residuals.')
if self.normalize_residuals:
res = res - res.values.mean()
res.name = "Residuals"
return res
def noise(self, parameters=None, tmin=None, tmax=None, freq=None,
warmup=None):
"""Method to simulate the noise when a noisemodel is present.
Parameters
----------
parameters: list, optional
Array of the parameters used in the time series model. See
Model.get_parameters() for more info if parameters is None.
tmin: str, optional
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: str, optional
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
freq: str, optional
String with the frequency the stressmodels are simulated. Must
be one of the following: (D, h, m, s, ms, us, ns) or a multiple of
that e.g. "7D".
warmup: float/int, optional
Warmup period (in Days).
Returns
-------
noise : pandas.Series
Pandas series of the noise.
Notes
-----
The noise are the time series that result when applying a noise
model.
"""
if (self.noisemodel is None) or (self.settings["noise"] is False):
self.logger.error("Noise cannot be calculated if there is no "
"noisemodel present or is not used during "
"parameter estimation.")
return None
if freq is None:
freq = self.settings["freq"]
# Get parameters if none are provided
if parameters is None:
parameters = self.get_parameters()
# Calculate the residuals
res = self.residuals(parameters, tmin, tmax, freq, warmup)
# Calculate the noise
noise = self.noisemodel.simulate(res,
parameters[-self.noisemodel.nparam:])
return noise
def observations(self, tmin=None, tmax=None, freq=None,
update_observations=False):
"""Method that returns the observations series used for calibration.
Parameters
----------
tmin: str, optional
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: str, optional
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
freq: str, optional
String with the frequency the stressmodels are simulated. Must
be one of the following: (D, h, m, s, ms, us, ns) or a multiple of
that e.g. "7D".
update_observations : bool, optional
if True, force recalculation of the observations series, default
is False
Returns
-------
oseries_calib: pandas.Series
pandas series of the oseries used for calibration of the model
Notes
-----
This method makes sure the simulation is compared to the nearest
observation. It finds the index closest to sim_index, and then returns
a selection of the oseries. in the residuals method, the simulation is
interpolated to the observation-timestamps.
"""
if tmin is None and self.settings['tmin']:
tmin = self.settings['tmin']
else:
tmin = self.get_tmin(tmin, freq, use_oseries=False,
use_stresses=True)
if tmax is None and self.settings['tmax']:
tmax = self.settings['tmax']
else:
tmax = self.get_tmax(tmax, freq, use_oseries=False,
use_stresses=True)
if freq is None:
freq = self.settings["freq"]
for key, setting in zip([tmin, tmax, freq], ["tmin", "tmax", "freq"]):
if key != self.settings[setting]:
update_observations = True
if self.oseries_calib is None or update_observations:
oseries_calib = self.oseries.series.loc[tmin:tmax]
# sample measurements, so that frequency is not higher than model
# keep the original timestamps, as they will be used during
# interpolation of the simulation
sim_index = self.get_sim_index(tmin, tmax, freq,
self.settings["warmup"])
if not oseries_calib.empty:
index = get_sample(oseries_calib.index, sim_index)
oseries_calib = oseries_calib.loc[index]
else:
oseries_calib = self.oseries_calib
return oseries_calib
def initialize(self, tmin=None, tmax=None, freq=None, warmup=None,
noise=None, weights=None, initial=True, fit_constant=None):
"""Method to initialize the model.
This method is called by the solve-method, but can also be triggered
manually. See the solve-method for a description of the arguments.
"""
if noise is None and self.noisemodel:
noise = True
elif noise is True and self.noisemodel is None:
self.logger.warning("""Warning, solving with noisemodel while no
noisemodel is defined. No noisemodel is used.""")
noise = False
self.settings["noise"] = noise
self.settings["weights"] = weights
# Set the frequency & warmup
if freq:
self.settings["freq"] = frequency_is_supported(freq)
if warmup is not None:
self.settings["warmup"] = pd.Timedelta(days=warmup)
# Set the time offset from the frequency (this does not work as expected yet)
# self._set_time_offset()
# Set tmin and tmax
self.settings["tmin"] = self.get_tmin(tmin)
self.settings["tmax"] = self.get_tmax(tmax)
# set fit_constant
if fit_constant is not None:
self.settings["fit_constant"] = fit_constant
# make sure calibration data is renewed
self.sim_index = self.get_sim_index(self.settings["tmin"],
self.settings["tmax"],
self.settings["freq"],
self.settings["warmup"],
update_sim_index=True)
self.oseries_calib = self.observations(tmin=self.settings["tmin"],
tmax=self.settings["tmax"],
freq=self.settings["freq"],
update_observations=True)
self.interpolate_simulation = None
# Initialize parameters
self.parameters = self.get_init_parameters(noise, initial)
# Prepare model if not fitting the constant as a parameter
if not self.settings["fit_constant"]:
self.parameters.loc["constant_d", "vary"] = False
self.parameters.loc["constant_d", "initial"] = 0.0
self.normalize_residuals = True
def solve(self, tmin=None, tmax=None, freq=None, warmup=None, noise=True,
solver=None, report=True, initial=True, weights=None,
fit_constant=True, **kwargs):
"""Method to solve the time series model.
Parameters
----------
tmin: str, optional
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: str, optional
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
freq: str, optional
String with the frequency the stressmodels are simulated. Must
be one of the following: (D, h, m, s, ms, us, ns) or a multiple of
that e.g. "7D".
warmup: float/int, optional
Warmup period (in Days) for which the simulation is calculated,
but not used for the calibration period.
noise: bool, optional
Argument that determines if a noisemodel is used (only if
present). The default is noise=True.
solver: pastas.solver.BaseSolver class, optional
Class used to solve the model. Options are: ps.LeastSquares
(default) or ps.LmfitSolve. A class is needed, not an instance
of the class!
report: bool, optional
Print a report to the screen after optimization finished. This
can also be manually triggered after optimization by calling
print(ml.fit_report()) on the Pastas model instance.
initial: bool, optional
Reset initial parameters from the individual stressmodels.
Default is True. If False, the optimal values from an earlier
optimization are used.
weights: pandas.Series, optional
Pandas Series with values by which the residuals are multiplied,
index-based.
fit_constant: bool, optional
Argument that determines if the constant is fitted as a parameter.
If it is set to False, the constant is set equal to the mean of
the residuals.
**kwargs: dict, optional
All keyword arguments will be passed onto minimization method
from the solver. It depends on the solver used which arguments
can be used.
Notes
-----
- The solver object including some results are stored as ml.fit. From
here one can access the covariance (ml.fit.pcov) and correlation
matrix (ml.fit.pcor).
- Each solver return a number of results after optimization. These
solver specific results are stored in ml.fit.result and can be
accessed from there.
"""
# Initialize the model
self.initialize(tmin, tmax, freq, warmup, noise, weights, initial,
fit_constant)
if self.oseries_calib.empty:
raise ValueError("Calibration series 'oseries_calib' is empty! "
"Check 'tmin' or 'tmax'.")
# Store the solve instance
if solver is None:
if self.fit is None:
self.fit = LeastSquares(ml=self)
elif not issubclass(solver, self.fit.__class__):
self.fit = solver(ml=self)
self.settings["solver"] = self.fit._name
# Solve model
success, optimal, stderr = self.fit.solve(noise=noise, weights=weights,
**kwargs)
if not success:
self.logger.warning("Model parameters could not be estimated "
"well.")
if not self.settings['fit_constant']:
# Determine the residuals and set the constant to their mean
self.normalize_residuals = False
res = self.residuals(optimal).mean()
optimal[self.parameters.name == self.constant.name] = res
self.parameters.optimal = optimal
self.parameters.stderr = stderr
if report:
print(self.fit_report())
def set_initial(self, name, value, move_bounds=False):
"""Method to set the initial value of any parameter.
Parameters
----------
name: str
name of the parameter to update.
value: float
parameters value to use as initial estimate.
move_bounds: bool, optional
Reset pmin/pmax based on new initial value.
"""
if move_bounds:
factor = value / self.parameters.loc[name, 'initial']
min_new = self.parameters.loc[name, 'pmin'] * factor
self.set_parameter(name, min_new, 'pmin')
max_new = self.parameters.loc[name, 'pmax'] * factor
self.set_parameter(name, max_new, 'pmax')
self.set_parameter(name, value, "initial")
def set_vary(self, name, value):
"""Method to set if the parameter is allowed to vary.
Parameters
----------
name: str
name of the parameter to update.
value: bool
boolean to vary a parameter (True) or not (False).
"""
self.set_parameter(name, bool(value), "vary")
def set_pmin(self, name, value):
"""Method to set the minimum value of a parameter.
Parameters
----------
name: str
name of the parameter to update.
value: float
minimum value for the parameter.
"""
self.set_parameter(name, value, "pmin")
def set_pmax(self, name, value):
"""Method to set the maximum values of a parameter.
Parameters
----------
name: str
name of the parameter to update.
value: float
maximum value for the parameter.
"""
self.set_parameter(name, value, "pmax")
def set_parameter(self, name, value, kind):
"""Internal method to set the parameter value for some kind.
"""
if name not in self.parameters.index:
msg = "parameter {} is not present in the model".format(name)
self.logger.error(msg)
raise KeyError(msg)
cat = self.parameters.loc[name, "name"]
# Because either of the following is not necessarily present
noisemodel = self.noisemodel.name if self.noisemodel else "NotPresent"
constant = self.constant.name if self.constant else "NotPresent"
if cat in self.stressmodels.keys():
self.stressmodels[cat].__getattribute__("set_" + kind)(name, value)
self.parameters.loc[name, kind] = value
elif cat == noisemodel:
self.noisemodel.__getattribute__("set_" + kind)(name, value)
self.parameters.loc[name, kind] = value
elif cat == constant:
self.constant.__getattribute__("set_" + kind)(name, value)
self.parameters.loc[name, kind] = value
def _set_freq(self):
"""Internal method to set the frequency in the settings. This is
method is not yet applied and is for future development.
"""
freqs = set()
if self.oseries.freq:
# when the oseries has a constant frequency, us this
freqs.add(self.oseries.freq)
else:
# otherwise determine frequency from the stressmodels
for stressmodel in self.stressmodels.values():
if stressmodel.stress:
for stress in stressmodel.stress:
if stress.settings['freq']:
# first check the frequency, and use this
freqs.add(stress.settings['freq'])
elif stress.freq_original:
# if this is not available, and the original frequency is, take the original frequency
freqs.add(stress.freq_original)
if len(freqs) == 1:
# if there is only one frequency, use this frequency
self.settings["freq"] = next(iter(freqs))
elif len(freqs) > 1:
# if there are more frequencies, take the highest frequency (lowest dt)
freqs = list(freqs)
dt = np.array([get_dt(f) for f in freqs])
self.settings["freq"] = freqs[np.argmin(dt)]
else:
self.logger.info("Frequency of model cannot be determined. "
"Frequency is set to daily")
self.settings["freq"] = "D"
def _set_time_offset(self):
"""Internal method to set the time offset for the model class.
Notes
-----
Method to check if the StressModel timestamps match (e.g. similar hours)
"""
time_offsets = set()
for stressmodel in self.stressmodels.values():
for st in stressmodel.stress:
if st.freq_original:
# calculate the offset from the default frequency
time_offset = get_time_offset(
st.series_original.index.min(),
self.settings["freq"])
time_offsets.add(time_offset)
if len(time_offsets) > 1:
msg = (
"The time-differences with the default frequency is not the "
"same for all stresses.")
self.logger.error(msg)
raise (Exception(msg))
if len(time_offsets) == 1:
self.settings["time_offset"] = next(iter(time_offsets))
else:
self.settings["time_offset"] = pd.Timedelta(0)
def get_stressmodel_names(self):
"""Returns list of stressmodel names"""
return list(self.stressmodels.keys())
def get_sim_index(self, tmin, tmax, freq, warmup, update_sim_index=False):
"""Internal method to get the simulation index, including the warmup.
Parameters
----------
tmin: str
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: str
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
freq: str
String with the frequency the stressmodels are simulated. Must
be one of the following: (D, h, m, s, ms, us, ns) or a multiple of
that e.g. "7D".
warmup: float/int
Warmup period (in Days).
update_sim_index : bool, optional
if True, force recalculation of sim_index, default is False
Returns
-------
sim_index: pandas.DatetimeIndex
Pandas DatetimeIndex instance with the datetimes values for
which the model is simulated.
"""
# Check if any of the settings are updated
for key, setting in zip([tmin, tmax, freq, warmup],
["tmin", "tmax", "freq", "warmup"]):
if key != self.settings[setting]:
update_sim_index = True
if self.sim_index is None or update_sim_index:
tmin = (tmin - warmup).floor(freq) + self.settings["time_offset"]
sim_index = pd.date_range(tmin, tmax, freq=freq)
else:
sim_index = self.sim_index
return sim_index
def get_tmin(self, tmin=None, freq=None, use_oseries=True,
use_stresses=False):
"""Method that checks and returns valid values for tmin.
Parameters
----------
tmin: str, optional
string with a year or date that can be turned into a pandas
Timestamp (e.g. pd.Timestamp(tmin)).
freq: str, optional
string with the frequency.
use_oseries: bool, optional
Obtain the tmin and tmax from the oseries. Default is True.
use_stresses: bool, optional
Obtain the tmin and tmax from the stresses. The minimum/maximum
time from all stresses is taken.
Returns
-------
tmin: pandas.Timestamp
returns pandas timestamps for tmin.
Notes
-----
The parameters tmin and tmax are leading, unless use_oseries is
True, then these are checked against the oseries index. The tmin and
tmax are checked and returned according to the following rules:
A. If no value for tmin is provided:
1. If use_oseries is True, tmin is based on the oseries.
2. If use_stresses is True, tmin is based on the stressmodels.
B. If a values for tmin is provided:
1. A pandas timestamp is made from the string
2. if use_oseries is True, tmin is checked against oseries.
C. In all cases an offset for the tmin is added.
A detailed description of dealing with tmin and timesteps in general
can be found in the developers section of the docs.
"""
# Get tmin from the oseries
if use_oseries:
ts_tmin = self.oseries.series.index.min()
# Get tmin from the stressmodels
elif use_stresses:
ts_tmin = pd.Timestamp.max
for stressmodel in self.stressmodels.values():
if stressmodel.tmin < ts_tmin:
ts_tmin = stressmodel.tmin
# Get tmin and tmax from user provided values
else:
ts_tmin = pd.Timestamp(tmin)
# Set tmin properly
if tmin is not None and use_oseries:
tmin = max(pd.Timestamp(tmin), ts_tmin)
elif tmin is not None:
tmin = pd.Timestamp(tmin)
else:
tmin = ts_tmin
# adjust tmin and tmax so that the time-offset is equal to the stressmodels.
if freq is None:
freq = self.settings["freq"]
tmin = tmin.floor(freq) + self.settings["time_offset"]
return tmin
def get_tmax(self, tmax=None, freq=None, use_oseries=True,
use_stresses=False):
"""Method that checks and returns valid values for tmin and tmax.
Parameters
----------
tmax: str, optional
string with a year or date that can be turned into a pandas
Timestamp (e.g. pd.Timestamp(tmax)).
freq: str, optional
string with the frequency.
use_oseries: bool, optional
Obtain the tmin and tmax from the oseries. Default is True.
use_stresses: bool, optional
Obtain the tmin and tmax from the stresses. The minimum/maximum
time from all stresses is taken.
Returns
-------
tmax: pandas.Timestamp
returns pandas timestamps for tmax.
Notes
-----
The parameters tmin and tmax are leading, unless use_oseries is
True, then these are checked against the oseries index. The tmin and
tmax are checked and returned according to the following rules:
A. If no value for tmax is provided:
1. If use_oseries is True, tmax is based on the
oseries.
2. If use_stresses is True, tmax is based on the
stressmodels.
B. If a values for tmax is provided:
1. A pandas timestamp is made from the string
2. if use_oseries is True, tmax is checked against oseries.
C. In all cases an offset for the tmax is added.
A detailed description of dealing with tmax and timesteps
in general can be found in the developers section of the docs.
"""
# Get tmax from the oseries
if use_oseries:
ts_tmax = self.oseries.series.index.max()
# Get tmax from the stressmodels
elif use_stresses:
ts_tmax = pd.Timestamp.min
for stressmodel in self.stressmodels.values():
if stressmodel.tmax > ts_tmax:
ts_tmax = stressmodel.tmax
# Get tmax from user provided values
else:
ts_tmax = pd.Timestamp(tmax)
# Set tmax properly
if tmax is not None and use_oseries:
tmax = min(pd.Timestamp(tmax), ts_tmax)
elif tmax is not None:
tmax = pd.Timestamp(tmax)
else:
tmax = ts_tmax
# adjust tmax so that the time-offset is equal to the stressmodels.
if freq is None:
freq = self.settings["freq"]
tmax = tmax.floor(freq) + self.settings["time_offset"]
return tmax
def get_init_parameters(self, noise=None, initial=True):
"""Method to get all initial parameters from the individual objects.
Parameters
----------
noise: bool, optional
Add the parameters for the noisemodel to the parameters
Dataframe or not.
initial: bool, optional
True to get initial parameters, False to get optimized parameters.
Returns
-------
parameters: pandas.DataFrame
pandas.Dataframe with the parameters.
"""
if noise is None:
noise = self.settings['noise']
parameters = pd.DataFrame(columns=["initial", "name", "optimal",
"pmin", "pmax", "vary", "stderr"])
for sm in self.stressmodels.values():
parameters = parameters.append(sm.parameters, sort=False)
if self.constant:
parameters = parameters.append(self.constant.parameters,
sort=False)
if self.transform:
parameters = parameters.append(self.transform.parameters,
sort=False)
if self.noisemodel and noise:
parameters = parameters.append(self.noisemodel.parameters,
sort=False)
# Set initial parameters to optimal parameters from model
if not initial:
paramold = self.parameters.optimal
parameters.initial.update(paramold)
parameters.optimal.update(paramold)
return parameters
def get_parameters(self, name=None):
"""Internal method to obtain the parameters needed for calculation.
This method is used by the simulation, residuals and the noise
methods as well as other methods that need parameters values as arrays.
Parameters
----------
name: str, optional
string with the name of the pastas.stressmodel object.
Returns
-------
p: numpy.ndarray
Numpy array with the parameters used in the time series model.
"""
if name:
p = self.parameters.loc[self.parameters.name == name]
else:
p = self.parameters
if p.optimal.hasnans:
self.logger.warning(
"Model is not optimized yet, initial parameters are used.")
parameters = p.initial
else:
parameters = p.optimal
return parameters.values
@get_stressmodel
def get_contribution(self, name, tmin=None, tmax=None, freq=None,
warmup=None, istress=None, return_warmup=False,
parameters=None):
"""Method to get the contribution of a stressmodel.
Parameters
----------
name: str
String with the name of the stressmodel.
tmin: str, optional
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: str, optional
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
freq: str, optional
String with the frequency the stressmodels are simulated. Must
be one of the following: (D, h, m, s, ms, us, ns) or a multiple of
that e.g. "7D".
warmup: float/int, optional
Warmup period (in Days).
istress: int, optional
When multiple stresses are present in a stressmodel, this keyword
can be used to obtain the contribution of an individual stress.
return_warmup: bool, optional
Include warmup in contribution calculation or not.
parameters: list or numpy.ndarray
iterable with the parameters. If none, the optimal parameters are
used when available, initial otherwise.
Returns
-------
contrib: pandas.Series
Pandas Series with the contribution.
"""
if parameters is None:
parameters = self.get_parameters(name)
if tmin is None:
tmin = self.settings['tmin']
if tmax is None:
tmax = self.settings['tmax']
if freq is None:
freq = self.settings["freq"]
if warmup is None:
warmup = self.settings["warmup"]
else:
warmup = pd.Timedelta(days=warmup)
# use warmup
if tmin:
tmin_warm = pd.Timestamp(tmin) - warmup
else:
tmin_warm = None
dt = get_dt(freq)
kwargs = {'tmin': tmin_warm, 'tmax': tmax, 'freq': freq, 'dt': dt}
if istress is not None:
kwargs['istress'] = istress
contrib = self.stressmodels[name].simulate(parameters, **kwargs)
# Respect provided tmin/tmax at this point, since warmup matters for
# simulation but should not be returned, unless return_warmup=True.
if not return_warmup:
contrib = contrib.loc[tmin:tmax]
return contrib
def get_contributions(self, split=True, **kwargs):
"""Method to get contributions of all stressmodels.
Parameters
----------
split: bool, optional
Split the stresses in multiple stresses when possible.
kwargs: any other arguments are passed to get_contribution
Returns
-------
contribs: list
a list of Pandas Series of the contributions.
"""
contribs = []
for name in self.stressmodels:
nsplit = self.stressmodels[name].get_nsplit()
if split and nsplit > 1:
for istress in range(nsplit):
contrib = self.get_contribution(name, istress=istress,
**kwargs)
contribs.append(contrib)
else:
contrib = self.get_contribution(name, **kwargs)
contribs.append(contrib)
return contribs
def get_transform_contribution(self, tmin=None, tmax=None):
"""Method to get the contribution of a transform.
Parameters
----------
tmin: str, optional
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: str, optional
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
Returns
-------
contrib: pandas.Series
Pandas Series with the contribution.
"""
sim = self.simulate(tmin=tmin, tmax=tmax)
# calculate what the simulation without the transform is
ml = copy(self)
ml.del_transform()
sim_org = ml.simulate(tmin=tmin, tmax=tmax)
return sim - sim_org
def get_response(self, block_or_step, name, parameters=None, dt=None,
add_0=False, **kwargs):
"""Internal method to compute the block and step response.
Parameters
----------
block_or_step: str
String with "step" or "block"
name: str
string with the name of the stressmodel
parameters: ndarray, optional
array with the parameters
dt: float, optional
timestep for the response function.
add_0: bool, optional
Add a zero at t=0.
kwargs
Returns
-------
"""
if not hasattr(self.stressmodels[name], "rfunc"):
raise TypeError("Stressmodel {} has no rfunc".format(name))
else:
block_or_step = getattr(self.stressmodels[name].rfunc,
block_or_step)
if parameters is None:
parameters = self.get_parameters(name)
if dt is None:
dt = get_dt(self.settings["freq"])
response = block_or_step(parameters, dt, **kwargs)
if add_0:
response = np.insert(response, 0, 0.0)
if isinstance(dt, np.ndarray):
t = dt
else:
t = np.linspace(dt, response.size * dt, response.size)
response = | pd.Series(response, index=t, name=name) | pandas.Series |
import pandas as pd
from scipy.stats import linregress
df = pd.read_csv('Data/selected_100_normalized_merged.csv')
personality_features = ['reputation', 'Openness', 'Conscientiousness', 'Extraversion', 'Agreeableness', 'Emotional range']
numeric_features = ['question_count', 'answer_count']
# print(linregress(df['gender_num'], df['total_cheap']).rvalue)
# print(type(linregress(df['gender_num'], df['total_cheap'])))
arr = []
for x in personality_features[:1]:
for y in numeric_features[:1]:
r = round(linregress(df[x], df[y]).rvalue, 5)
p = round(linregress(df[x], df[y]).pvalue, 5)
# print(x, y, r, p)
arr.append([x, r, p])
tf = | pd.DataFrame(arr, columns=['x', 'R-value_answer', 'P-value_answer']) | pandas.DataFrame |
import glob
from shutil import copy2
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
import pprint
import pandas as pd
import plotly.express as px
from plotly.subplots import make_subplots
prompt = lambda q : input("{} (y/n): ".format(q)).lower().strip()[:1] == "y"
def parse_log(filename, params, eval_k, cl_to_plot_id, target_measure, print_params, start_line=None, end_line=None):
res_map={}
errors = {}
losses = {}
MRRs = {}
MAPs = {}
AUCs = {}
prec = {}
rec = {}
f1 = {}
prec_at_k = {}
rec_at_k = {}
f1_at_k = {}
prec_cl = {}
rec_cl = {}
f1_cl = {}
prec_at_k_cl = {}
rec_at_k_cl = {}
f1_at_k_cl = {}
best_measure = {}
best_epoch = {}
target_metric_best = {}
last_test_ep={}
last_test_ep['precision'] = '-'
last_test_ep['recall'] = '-'
last_test_ep['F1'] = '-'
last_test_ep['AVG-precision'] = '-'
last_test_ep['AVG-recall'] = '-'
last_test_ep['AVG-F1'] = '-'
last_test_ep['precision@'+str(eval_k)] = '-'
last_test_ep['recall@'+str(eval_k)] = '-'
last_test_ep['F1@'+str(eval_k)] = '-'
last_test_ep['AVG-precision@'+str(eval_k)] = '-'
last_test_ep['AVG-recall@'+str(eval_k)] = '-'
last_test_ep['AVG-F1@'+str(eval_k)] = '-'
last_test_ep['mrr'] = '-'
last_test_ep['map'] = '-'
last_test_ep['auc'] = '-'
last_test_ep['best_epoch'] = -1
set_names = ['TRAIN', 'VALID', 'TEST']
finished = False
epoch = 0
metrics_names = ["error" ,
"loss" ,
"mrr" ,
"map" ,
"auc" ,
"gmauc" ,
"lp_map" ,
"lp_auc",
"1000_auc",
"1000_map",
"100_auc",
"100_map",
"10_auc",
"10_map",
"1_auc",
"1_map",
]
metrics = {metric: {} for metric in metrics_names}
for s in set_names:
for metric in metrics:
metrics[metric][s] = {}
prec[s] = {}
rec[s] = {}
f1[s] = {}
prec_at_k[s] = {}
rec_at_k[s] = {}
f1_at_k[s] = {}
prec_cl[s] = {}
rec_cl[s] = {}
f1_cl[s] = {}
prec_at_k_cl[s] = {}
rec_at_k_cl[s] = {}
f1_at_k_cl[s] = {}
best_measure[s] = 0
best_epoch[s] = -1
str_comments=''
str_comments1=''
exp_params={}
#print ("Start parsing: ",filename, 'starting at', start_line)
with open(filename) as f:
params_line=True
readlr=False
line_nr = 0
for line in f:
line_nr += 1
if start_line != None and start_line > line_nr:
continue
if end_line != None and end_line <= line_nr:
break
line=line.replace('INFO:root:','').replace('\n','')
if params_line: #print parameters
if "'learning_rate':" in line:
readlr=True
if not readlr:
str_comments+=line+'\n'
else:
str_comments1+=line+'\n'
if params_line: #print parameters
for p in params:
str_p='\''+p+'\': '
if str_p in line:
exp_params[p]=line.split(str_p)[1].split(',')[0]
if line=='':
params_line=False
if 'TRAIN epoch' in line or 'VALID epoch' in line or 'TEST epoch' in line:
set_name = line.split(' ')[1]
previous_epoch = epoch
epoch = int(line.split(' ')[3])
if set_name=='TEST':
last_test_ep['best_epoch'] = epoch
if epoch>=50000:
break
if previous_epoch > epoch and epoch == 1:
epoch = previous_epoch #used to distinguish between downstream and frozen decoder
break #A new training has started, e.g frozen encoder or downstream
#print('set_name', set_name, 'epoch', epoch)
if 'Number of parameters' in line:
res_map['num_gcn_params'] = int(line.split('GCN: ')[1].split(',')[0])
res_map['num_cls_params'] = int(line.split('Classifier: ')[1].split(',')[0])
res_map['num_total_params'] = int(line.split('Total: ')[1].split(',')[0])
assert(res_map['num_gcn_params'] + res_map['num_cls_params'] == res_map['num_total_params'])
if "mean" in line:
for metric in metrics:
if "mean {} ".format(metric) in line:
v=float(line.split('mean {} '.format(metric))[1].split(' ')[0])
metrics[metric][set_name][epoch]=v
if target_measure==metric:
if target_measure == 'loss':
is_better = v<best_measure[set_name]
else:
is_better = v>best_measure[set_name]
if is_better:
best_measure[set_name]=v
best_epoch[set_name]=epoch
if set_name=='TEST':
last_test_ep[metric] = v
if 'measures microavg' in line:
prec[set_name][epoch]=float(line.split('precision ')[1].split(' ')[0])
rec[set_name][epoch]=float(line.split('recall ')[1].split(' ')[0])
f1[set_name][epoch]=float(line.split('f1 ')[1].split(' ')[0])
if (target_measure=='avg_p' or target_measure=='avg_r' or target_measure=='avg_f1'):
if target_measure=='avg_p':
v=prec[set_name][epoch]
elif target_measure=='avg_r':
v=rec[set_name][epoch]
else: #F1
v=f1[set_name][epoch]
if v>best_measure[set_name]:
best_measure[set_name]=v
best_epoch[set_name]=epoch
if set_name=='TEST':
last_test_ep['AVG-precision'] = prec[set_name][epoch]
last_test_ep['AVG-recall'] = rec[set_name][epoch]
last_test_ep['AVG-F1'] = f1[set_name][epoch]
elif 'measures@'+str(eval_k)+' microavg' in line:
prec_at_k[set_name][epoch]=float(line.split('precision ')[1].split(' ')[0])
rec_at_k[set_name][epoch]=float(line.split('recall ')[1].split(' ')[0])
f1_at_k[set_name][epoch]=float(line.split('f1 ')[1].split(' ')[0])
if set_name=='TEST':
last_test_ep['AVG-precision@'+str(eval_k)] = prec_at_k[set_name][epoch]
last_test_ep['AVG-recall@'+str(eval_k)] = rec_at_k[set_name][epoch]
last_test_ep['AVG-F1@'+str(eval_k)] = f1_at_k[set_name][epoch]
elif 'measures for class ' in line:
cl=int(line.split('class ')[1].split(' ')[0])
if cl not in prec_cl[set_name]:
prec_cl[set_name][cl] = {}
rec_cl[set_name][cl] = {}
f1_cl[set_name][cl] = {}
prec_cl[set_name][cl][epoch]=float(line.split('precision ')[1].split(' ')[0])
rec_cl[set_name][cl][epoch]=float(line.split('recall ')[1].split(' ')[0])
f1_cl[set_name][cl][epoch]=float(line.split('f1 ')[1].split(' ')[0])
if (target_measure=='p' or target_measure=='r' or target_measure=='f1') and cl==cl_to_plot_id:
if target_measure=='p':
v=prec_cl[set_name][cl][epoch]
elif target_measure=='r':
v=rec_cl[set_name][cl][epoch]
else: #F1
v=f1_cl[set_name][cl][epoch]
if v>best_measure[set_name]:
best_measure[set_name]=v
best_epoch[set_name]=epoch
if set_name=='TEST':
last_test_ep['precision'] = prec_cl[set_name][cl][epoch]
last_test_ep['recall'] = rec_cl[set_name][cl][epoch]
last_test_ep['F1'] = f1_cl[set_name][cl][epoch]
elif 'measures@'+str(eval_k)+' for class ' in line:
cl=int(line.split('class ')[1].split(' ')[0])
if cl not in prec_at_k_cl[set_name]:
prec_at_k_cl[set_name][cl] = {}
rec_at_k_cl[set_name][cl] = {}
f1_at_k_cl[set_name][cl] = {}
prec_at_k_cl[set_name][cl][epoch]=float(line.split('precision ')[1].split(' ')[0])
rec_at_k_cl[set_name][cl][epoch]=float(line.split('recall ')[1].split(' ')[0])
f1_at_k_cl[set_name][cl][epoch]=float(line.split('f1 ')[1].split(' ')[0])
if (target_measure=='p@k' or target_measure=='r@k' or target_measure=='f1@k') and cl==cl_to_plot_id:
if target_measure=='p@k':
v=prec_at_k_cl[set_name][cl][epoch]
elif target_measure=='r@k':
v=rec_at_k_cl[set_name][cl][epoch]
else:
v=f1_at_k_cl[set_name][cl][epoch]
if v>best_measure[set_name]:
best_measure[set_name]=v
best_epoch[set_name]=epoch
if set_name=='TEST':
last_test_ep['precision@'+str(eval_k)] = prec_at_k_cl[set_name][cl][epoch]
last_test_ep['recall@'+str(eval_k)] = rec_at_k_cl[set_name][cl][epoch]
last_test_ep['F1@'+str(eval_k)] = f1_at_k_cl[set_name][cl][epoch]
if 'FINISHED' in line:
finished = True
if best_epoch['TEST']<0 and best_epoch['VALID']<0 or last_test_ep['best_epoch']<1:
# Nothing learned, it is useless, abort
print ('best_epoch<0: -> skip')
target_best = {}
target_best['TEST'] = 0
str_legend = 'useless'
str_results = 0
return res_map, exp_params, metrics, str_legend, str_results, target_best, finished, line_nr, epoch
if start_line == None:
# Will fail for frozen encoder and downstream runs, so only do this for the first parse
res_map['model'] = exp_params['model'].replace("'","")
str_params=(pprint.pformat(exp_params))
if print_params:
print ('str_params:\n', str_params)
if best_epoch['VALID']>=0:
best_ep = best_epoch['VALID']
#print ('Highest %s values among all epochs: TRAIN %0.4f\tVALID %0.4f\tTEST %0.4f' % (target_measure, best_measure['TRAIN'], best_measure['VALID'], best_measure['TEST']))
else:
best_ep = best_epoch['TEST']
#print ('Highest %s values among all epochs:\tTRAIN F1 %0.4f\tTEST %0.4f' % (target_measure, best_measure['TRAIN'], best_measure['TEST']))
use_latest_ep = True
try:
#print ('Values at best Valid Epoch (%d) for target class: TEST Precision %0.4f - Recall %0.4f - F1 %0.4f' % (best_ep, prec_cl['TEST'][cl_to_plot_id][best_ep],rec_cl['TEST'][cl_to_plot_id][best_ep],f1_cl['TEST'][cl_to_plot_id][best_ep]))
#print ('Values at best Valid Epoch (%d) micro-AVG: TEST Precision %0.4f - Recall %0.4f - F1 %0.4f' % (best_ep, prec['TEST'][best_ep],rec['TEST'][best_ep],f1['TEST'][best_ep]))
res_map['precision'] = prec_cl['TEST'][cl_to_plot_id][best_ep]
res_map['recall'] = rec_cl['TEST'][cl_to_plot_id][best_ep]
res_map['F1'] = f1_cl['TEST'][cl_to_plot_id][best_ep]
res_map['AVG-precision'] = prec['TEST'][best_ep]
res_map['AVG-recall'] = rec['TEST'][best_ep]
res_map['AVG-F1'] = f1['TEST'][best_ep]
except:
res_map['precision'] = last_test_ep['precision']
res_map['recall'] = last_test_ep['recall']
res_map['F1'] = last_test_ep['F1']
res_map['AVG-precision'] = last_test_ep['AVG-precision']
res_map['AVG-recall'] = last_test_ep['AVG-F1']
res_map['AVG-F1'] = last_test_ep['AVG-F1']
use_latest_ep = False
#print ('WARNING: last epoch not finished, use the previous one.')
try:
#print ('Values at best Valid Epoch (%d) for target class@%d: TEST Precision %0.4f - Recall %0.4f - F1 %0.4f' % (best_ep, eval_k, prec_at_k_cl['TEST'][cl_to_plot_id][best_ep],rec_at_k_cl['TEST'][cl_to_plot_id][best_ep],f1_at_k_cl['TEST'][cl_to_plot_id][best_ep]))
res_map['precision@'+str(eval_k)] = prec_at_k_cl['TEST'][cl_to_plot_id][best_ep]
res_map['recall@'+str(eval_k)] = rec_at_k_cl['TEST'][cl_to_plot_id][best_ep]
res_map['F1@'+str(eval_k)] = f1_at_k_cl['TEST'][cl_to_plot_id][best_ep]
#print ('Values at best Valid Epoch (%d) micro-AVG@%d: TEST Precision %0.4f - Recall %0.4f - F1 %0.4f' % (best_ep, eval_k, prec_at_k['TEST'][best_ep],rec_at_k['TEST'][best_ep],f1_at_k['TEST'][best_ep]))
res_map['AVG-precision@'+str(eval_k)] = prec_at_k['TEST'][best_ep]
res_map['AVG-recall@'+str(eval_k)] = rec_at_k['TEST'][best_ep]
res_map['AVG-F1@'+str(eval_k)] = f1_at_k['TEST'][best_ep]
except:
res_map['precision@'+str(eval_k)] = last_test_ep['precision@'+str(eval_k)]
res_map['recall@'+str(eval_k)] = last_test_ep['recall@'+str(eval_k)]
res_map['F1@'+str(eval_k)] = last_test_ep['F1@'+str(eval_k)]
res_map['AVG-precision@'+str(eval_k)] = last_test_ep['AVG-precision@'+str(eval_k)]
res_map['AVG-recall@'+str(eval_k)] = last_test_ep['AVG-recall@'+str(eval_k)]
res_map['AVG-F1@'+str(eval_k)] = last_test_ep['AVG-F1@'+str(eval_k)]
for metric in metrics:
if len(metrics[metric]['TEST']) <= 0:
continue
try:
if metric == target_measure:
target_metric_best['TRAIN'] = metrics[metric]['TRAIN'][best_ep]
target_metric_best['VALID'] = metrics[metric]['VALID'][best_ep]
target_metric_best['TEST'] = metrics[metric]['TEST'][best_ep]
#print('Values at best Valid Epoch ({}) {}: TRAIN {} - VALID {} - TEST {}'.format(
# best_ep,
# metric,
# metrics[metric]['TRAIN'][best_ep],
# metrics[metric]['VALID'][best_ep],
# metrics[metric]['TEST'][best_ep]))
res_map[metric] = metrics[metric]['TEST'][best_ep]
except:
res_map[metric] = last_test_ep[metric]
#print ('WARNING: last epoch not finished, use the previous one.')
if use_latest_ep:
res_map['best_epoch'] = best_ep
else:
#print ('WARNING: last epoch not finished, use the previous one.')
res_map['best_epoch'] = last_test_ep['best_epoch']
str_results = ''
str_legend = ''
for k, v in res_map.items():
str_results+=str(v)+','
str_legend+=str(k)+','
for k, v in exp_params.items():
str_results+=str(v)+','
str_legend+=str(k)+','
log_file = filename.split('/')[-1].split('.log')[0]
res_map['log_file'] = log_file
grid_cell = log_file.split('grid_')[1]
res_map['grid_cell'] = grid_cell
str_results+='{},{}'.format(log_file, grid_cell)
str_legend+='log_file,grid_cell'
#print ('\n\nCSV-like output:')
#print (str_legend)
#print (str_results)
return res_map, exp_params, metrics, str_legend, str_results, target_metric_best, finished, line_nr, epoch
def parse_all_logs_in_folder(log_folder, return_continuous_encoder_logs=False):
cl_to_plot_id = 1 # Target class, typically the low frequent one
# We don't do edge classification here
#if 'reddit' in log_folder or ('bitcoin' in log_folder and 'edge' in log_folder):
# cl_to_plot_id = 0 # 0 for reddit dataset_name or bitcoin edge cls
simulate_early_stop = 0 # Early stop patience
eval_k = 1000 # to compute metrics @K (for instance precision@1000)
print_params = False # Print the parameters of each simulation
##### End parameters ######
#if 'elliptic' in log_folder or 'reddit' in log_folder or 'enron' in log_folder or ('bitcoin' in log_folder and 'edge' in log_folder):
# target_measure='f1' # map mrr auc f1 p r loss avg_p avg_r avg_f1
#else:
# target_measure='map' # map mrr auc f1 p r loss avg_p avg_r avg_f1
target_measure='map' # map mrr auc f1 p r loss avg_p avg_r avg_f1
# Hyper parameters to analyze
params = []
params.append('learning_rate')
params.append('num_hist_steps')
params.append('layer_1_feats')
params.append('lstm_l1_feats')
params.append('class_weights')
params.append('adj_mat_time_window')
params.append('cls_feats')
params.append('model')
params.append('val_sampling')
logs = {}
continuous_encoder_logs = {}
csv = []
csv_continuous_encoder = []
header = None
log_folderfiles = glob.glob(log_folder+'*')
printstr = ''
best_log_file = ''
best_log_file_continuous_encoder = ''
best_target_metric = 0
best_target_metric_continuous_encoder = 0
for log_file in log_folderfiles:
if log_file.endswith(".log") and not 'best_' in log_file:
# First check whether it is downstream learning or not, then check if it is only pre-training (encoder only for continuous)
if 'decoder' in log_file and 'learning_rate' in log_file:
logs[log_file] = {}
num_lines = sum(1 for line in open(log_file))
(res_map, exp_params, metrics, str_legend, str_results,
target_metric_best, finished, end_line, end_epoch) = parse_log(log_file, params, eval_k, cl_to_plot_id,
target_measure, print_params)
if end_line < num_lines:
if end_epoch == 0:
# Downstream
downstream_results = parse_log(log_file, params, eval_k, cl_to_plot_id,
target_measure, print_params)
logs[log_file]['downstream'] = {}
logs[log_file]['downstream']['res_map'] = downstream_results[0]
logs[log_file]['downstream']['metrics'] = downstream_results[2]
else:
# Using a frozen decoder, store continuous training before filling log file results with frozen decoder training
logs[log_file]['continuous_training'] = {}
logs[log_file]['continuous_training']['res_map'] = res_map
logs[log_file]['continuous_training']['exp_params'] = exp_params
logs[log_file]['continuous_training']['metrics'] = metrics
logs[log_file]['continuous_training']['str_legend'] = str_legend
logs[log_file]['continuous_training']['str_results'] = str_results
logs[log_file]['continuous_training']['target_metric_best'] = target_metric_best
logs[log_file]['continuous_training']['finished'] = finished
(res_map, _, metrics, str_legend, str_results,
target_metric_best, finished, end_line, end_epoch) = parse_log(log_file, params, eval_k, cl_to_plot_id,
target_measure, print_params, start_line=end_line)
# Comment out if old log without finish signal
#if not finished:
# printstr+='Log not finished. Skipping {}\n'.format(log_file)
# continue
logs[log_file]['res_map'] = res_map
logs[log_file]['exp_params'] = exp_params
logs[log_file]['metrics'] = metrics
logs[log_file]['str_legend'] = str_legend
logs[log_file]['str_results'] = str_results
logs[log_file]['target_metric_best'] = target_metric_best
logs[log_file]['finished'] = finished
try:
cell_best = target_metric_best['TEST']
except KeyError:
#print("No test epoch to use")
continue
if best_target_metric < cell_best:
best_target_metric = cell_best
best_log_file = log_file
if len(csv) <= 0:
header = str_legend
csv = [str_legend, str_results]
else:
if(str_legend == header):
csv.append(str_results)
else:
print('Warning header was not correct didn\'t did a file return badly?')
elif 'decoder' in log_file: #training encoder for continuous
continuous_encoder_logs[log_file] = {}
num_lines = sum(1 for line in open(log_file))
(res_map, exp_params, metrics, str_legend, str_results,
target_metric_best, finished, end_line, end_epoch) = parse_log(log_file, params, eval_k, cl_to_plot_id,
target_measure, print_params)
continuous_encoder_logs[log_file]['res_map'] = res_map
continuous_encoder_logs[log_file]['exp_params'] = exp_params
continuous_encoder_logs[log_file]['metrics'] = metrics
continuous_encoder_logs[log_file]['str_legend'] = str_legend
continuous_encoder_logs[log_file]['str_results'] = str_results
continuous_encoder_logs[log_file]['target_metric_best'] = target_metric_best
continuous_encoder_logs[log_file]['finished'] = finished
cell_best = target_metric_best['TEST']
if best_target_metric_continuous_encoder < cell_best:
best_target_metric_continuous_encoder = cell_best
best_log_file_continuous_encoder = log_file
if len(csv) <= 0:
header = str_legend
csv_continuous_encoder = [str_legend, str_results]
else:
if(str_legend == header):
csv_continuous_encoder.append(str_results)
else:
print('Warning header was not correct didn\'t did a file return badly?')
else: #Downstream learning
# Skipping downstream learning for now.
pass
#print(printstr)
if not return_continuous_encoder_logs:
return logs, csv, best_log_file
else:
return logs, csv, best_log_file, continuous_encoder_logs, csv_continuous_encoder, best_log_file_continuous_encoder
def save_best_log(best_log_file):
# Add 'best_' in front of filename of the best log.
best_exist = False
existing_logs = []
log_folderfiles = glob.glob(log_folder+'*')
for log_file in log_folderfiles:
if 'best_' in log_file:
existing_logs.append(log_file)
best_exist = True
if best_exist and not prompt('Best log already exists, existing logs:\n{}\n write anyway?'
.format('\n'.join(existing_logs))):
print("Skipping saving best log")
pass
else:
print('Saving best log')
split_log_file = best_log_file.split('/')
split_log_file[-1] = "best_"+split_log_file[-1]
best_log_new_name = "/".join(split_log_file)
copy2(best_log_file, best_log_new_name)
def write_csv(csv, log_folder):
csv_file = log_folder+'results.csv'
log_folderfiles = glob.glob(log_folder+'*')
if csv_file in log_folderfiles and not prompt('CSV already calculated, overwrite?'):
print("Skipping writing to csv")
pass
else:
print('Writing to csv')
with open(csv_file, 'w') as f:
for line in csv:
f.write(line+'\n')
# Plot one metric
def plot_metric(metric, metric_name, plot_train):
df = pd.DataFrame.from_dict(metric)
if not plot_train:
df = df.drop(['TRAIN'], axis=1)
else:
# Rearrange columns so Train is at the end, thus they keep the same colours
cols = df.columns.to_list()
cols.append(cols.pop(0))
df = df[cols]
df['epochs'] = df.index
dfm = pd.melt(df, id_vars=['epochs'], var_name='set', value_name=metric_name)
fig = px.scatter(dfm, x='epochs', y=metric_name, color='set', trendline='ols')
return fig
# Plot many metrics
def plot_metrics(metrics, metric_names, plot_train_metrics=['loss']):
fig = make_subplots(rows=len(metric_names), cols=1, shared_xaxes=False)
for i, metric_name in enumerate(metric_names):
metric = metrics[metric_name]
df = | pd.DataFrame.from_dict(metric) | pandas.DataFrame.from_dict |
# This is a sample Python program that trains a BYOC TensorFlow model, and then performs inference.
# This implementation will work on your local computer.
#
# Prerequisites:
# 1. Install required Python packages:
# pip install boto3 sagemaker pandas scikit-learn
# pip install 'sagemaker[local]'
# 2. Docker Desktop has to be installed on your computer, and running.
# 3. Open terminal and run the following commands:
# docker build -t sagemaker-tensorflow2-batch-transform-local container/.
########################################################################################################################
import os
import pandas as pd
import sklearn.model_selection
from sagemaker.estimator import Estimator
from sklearn.datasets import *
from sklearn.preprocessing import StandardScaler
DUMMY_IAM_ROLE = 'arn:aws:iam::111111111111:role/service-role/AmazonSageMaker-ExecutionRole-20200101T000001'
def download_training_and_eval_data():
if os.path.isfile('./data/train/x_train.csv') and \
os.path.isfile('./data/test/x_test.csv') and \
os.path.isfile('./data/train/y_train.csv') and \
os.path.isfile('./data/test/y_test.csv'):
print('Training and evaluation datasets exist. Skipping Download')
else:
print('Downloading training and evaluation dataset')
data_dir = os.path.join(os.getcwd(), 'data')
os.makedirs(data_dir, exist_ok=True)
train_dir = os.path.join(os.getcwd(), 'data/train')
os.makedirs(train_dir, exist_ok=True)
test_dir = os.path.join(os.getcwd(), 'data/test')
os.makedirs(test_dir, exist_ok=True)
input_dir = os.path.join(os.getcwd(), 'data/input')
os.makedirs(input_dir, exist_ok=True)
output_dir = os.path.join(os.getcwd(), 'data/output')
os.makedirs(output_dir, exist_ok=True)
data_set = fetch_california_housing()
X = pd.DataFrame(data_set.data, columns=data_set.feature_names)
Y = pd.DataFrame(data_set.target)
# We partition the dataset into 2/3 training and 1/3 test set.
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(X, Y, test_size=0.33)
scaler = StandardScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
pd.DataFrame(x_train).to_csv(os.path.join(train_dir, 'x_train.csv'), header=None, index=False)
pd.DataFrame(x_test).to_csv(os.path.join(test_dir, 'x_test.csv'),header=None, index=False)
| pd.DataFrame(x_test) | pandas.DataFrame |
"""Functions for preprocessing cord-19 dataset."""
# -*- coding: utf-8 -*-
import json
import re
import tarfile
from datetime import datetime
from typing import List
from zipfile import ZipFile
import pandas as pd
# from pandas.io.json import json_normalize
def construct_regex_match_pattern(search_terms_file_path: str, search_type: str = 'fuzzy'):
"""
Construct regex search pattern for the specified terms.
:param terms_dict: file path for list of search terms
:param search_type: "exact" vs "flank_white_space" vs "fuzzy" pattern
:return: Regex search pattern
"""
with open(search_terms_file_path) as f:
search_terms = f.read().splitlines()
if search_type == 'exact':
exact_pattern = '|'.join([i.lower() for i in search_terms])
return exact_pattern
elif search_type == 'flank_white_space':
exact_pattern = '\W' + '\W|\W'.join([i.lower() for i in search_terms]) + '\W' # noqa: W605
return exact_pattern
else:
# TODO: fix flake8 error code FS001
fuzzy_terms = ['.*%s.*' % i.lower() for i in search_terms] # noqa: FS001
fuzzy_pattern = '|'.join(fuzzy_terms)
return fuzzy_pattern
def filter_metadata_for_covid19(metadata_path: str, virus_lex_path: str, pub_date_cutoff: str = None):
"""
Filter metadata to publications containing a COVID-19 synonym in title or abstract and published after cut-off date.
:param metadata_path: path to CORD-19 metadata.csv file
:param virus_lex_path: path to COVID-19 lexicon
:param pub_date_cutoff: cut-off for publication date in the format 'yyyy-mm-dd'
:return: Dataframe of metadata for filtered publications
"""
if pub_date_cutoff is not None:
pub_date_cutoff = datetime.strptime(pub_date_cutoff, "%Y-%m-%d")
metadata_df = pd.read_csv(metadata_path)
# Concatenate title and abstract text into a single, lower-cased column
metadata_df = metadata_df.fillna('')
metadata_df.loc[:, 'title_abstract'] = metadata_df.loc[:, 'title'].str.lower() + ' '\
+ metadata_df.loc[:, 'abstract'].str.lower()
metadata_df.loc[:, 'title_abstract'] = metadata_df.loc[:, 'title_abstract'].fillna('')
# Load file with COVID-19 lexicon (1 per line) and generate a search pattern
covid_19_term_pattern = construct_regex_match_pattern(virus_lex_path, 'exact')
covid19_df = metadata_df.loc[metadata_df.title_abstract.str.contains(covid_19_term_pattern)]\
.copy().reset_index(drop=True)
if pub_date_cutoff is not None:
# Format & convert publish_time column to datetime type of uniform format
covid19_df['publish_time'] = pd.to_datetime(covid19_df['publish_time'])
covid19_df['publish_time'] = covid19_df['publish_time'].dt.strftime('%Y-%m-%d')
covid19_df['publish_time'] = pd.to_datetime(covid19_df['publish_time'])
covid19_df = covid19_df.loc[covid19_df['publish_time'] > pub_date_cutoff]\
.copy().reset_index(drop=True)
return covid19_df
def extract_json_to_dataframe(covid19_metadata: pd.DataFrame,
json_text_file_dir: str,
json_temp_path: str,
pdf_filenames: List[str],
pmc_filenames: List[str]):
"""
Extract publications text from json files for a specified set of filenames and store in a dataframe.
:param covid19_metadata: pandas dataframe, output of filter_metadata_for_covid19()
:param json_text_file_dir: path to zip directory containing json files
:param json_temp_path: path for temporary file storage
:param pdf_filenames: list of pdf file names to extract
:param pmc_filenames: list of pmc file names to extract
:return: Dataframe of publication texts for the specified filenames
"""
# Empty dictonary to store the extracted section text
covid19_dict = {}
# Replace characters with their readable format
replace_dict = {'“': '“',
'â€': '”',
'’': '’',
'‘': '‘',
'—': '–',
'–': '—',
'•': '-',
'…': '…'}
if '.zip' in json_text_file_dir:
zipobj = ZipFile(json_text_file_dir, 'r')
list_of_filenames = zipobj.namelist()
elif 'tar.gz' in json_text_file_dir:
tarf = tarfile.open(json_text_file_dir, 'r:gz')
list_of_filenames = tarf.getnames()
else:
raise Exception("Incorrcet file extension. Must be '.zip' or '.tar.gz'")
# print('Number of files to iterate over:',len(list_of_filenames))
k = 0
# TODO: Parallelize the code below
for iter_num, filename in enumerate(list_of_filenames):
# Check filename ends with json and file exists in filtered list of cord papers
if (filename in pdf_filenames) or (filename in pmc_filenames):
if '.zip' in json_text_file_dir:
zipobj.extract(filename, json_temp_path)
elif 'tar.gz' in json_text_file_dir:
tarf.extract(tarf.getmembers()[iter_num], json_temp_path)
with open(json_temp_path + filename, 'r', encoding='utf8') as f:
# Read each line in the file separately, remove tabs, spaces and newlines
# and concatenate all lines together for further parsing
json_str = "".join([" ".join(line.split()) for line in f])
# Parse the json string into the json dictionary format
json_dict = json.loads(json_str, encoding='utf8')
# Convert the json dictionary object to a pandas dataframe
paper_df = | pd.json_normalize(json_dict) | pandas.json_normalize |
"""
Testing that functions from rpy work as expected
"""
import pandas as pd
import numpy as np
import unittest
import nose
import pandas.util.testing as tm
try:
import pandas.rpy.common as com
from rpy2.robjects import r
import rpy2.robjects as robj
except ImportError:
raise nose.SkipTest('R not installed')
class TestCommon(unittest.TestCase):
def test_convert_list(self):
obj = r('list(a=1, b=2, c=3)')
converted = com.convert_robj(obj)
expected = {'a': [1], 'b': [2], 'c': [3]}
tm.assert_dict_equal(converted, expected)
def test_convert_nested_list(self):
obj = r('list(a=list(foo=1, bar=2))')
converted = com.convert_robj(obj)
expected = {'a': {'foo': [1], 'bar': [2]}}
tm.assert_dict_equal(converted, expected)
def test_convert_frame(self):
# built-in dataset
df = r['faithful']
converted = com.convert_robj(df)
assert np.array_equal(converted.columns, ['eruptions', 'waiting'])
assert np.array_equal(converted.index, np.arange(1, 273))
def _test_matrix(self):
r('mat <- matrix(rnorm(9), ncol=3)')
r('colnames(mat) <- c("one", "two", "three")')
r('rownames(mat) <- c("a", "b", "c")')
return r['mat']
def test_convert_matrix(self):
mat = self._test_matrix()
converted = com.convert_robj(mat)
assert np.array_equal(converted.index, ['a', 'b', 'c'])
assert np.array_equal(converted.columns, ['one', 'two', 'three'])
def test_convert_r_dataframe(self):
is_na = robj.baseenv.get("is.na")
seriesd = tm.getSeriesData()
frame = pd.DataFrame(seriesd, columns=['D', 'C', 'B', 'A'])
# Null data
frame["E"] = [np.nan for item in frame["A"]]
# Some mixed type data
frame["F"] = ["text" if item %
2 == 0 else np.nan for item in range(30)]
r_dataframe = com.convert_to_r_dataframe(frame)
assert np.array_equal(
com.convert_robj(r_dataframe.rownames), frame.index)
assert np.array_equal(
com.convert_robj(r_dataframe.colnames), frame.columns)
assert all(is_na(item) for item in r_dataframe.rx2("E"))
for column in frame[["A", "B", "C", "D"]]:
coldata = r_dataframe.rx2(column)
original_data = frame[column]
assert np.array_equal(com.convert_robj(coldata), original_data)
for column in frame[["D", "E"]]:
for original, converted in zip(frame[column],
r_dataframe.rx2(column)):
if pd.isnull(original):
assert is_na(converted)
else:
assert original == converted
def test_convert_r_matrix(self):
is_na = robj.baseenv.get("is.na")
seriesd = tm.getSeriesData()
frame = pd.DataFrame(seriesd, columns=['D', 'C', 'B', 'A'])
# Null data
frame["E"] = [np.nan for item in frame["A"]]
r_dataframe = com.convert_to_r_matrix(frame)
assert np.array_equal(
| com.convert_robj(r_dataframe.rownames) | pandas.rpy.common.convert_robj |
"""
(C) Copyright 2019 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import warnings
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.impute import SimpleImputer as skImputer
from ..utils.stat_utils import which_columns_are_binary
from causallib.estimation import Matching
# TODO: Entire module might be redundant, now that scikit-learn supports missing values
# in its preprocessing: https://scikit-learn.org/stable/whats_new/v0.20.html#highlights
# The only support now needed is:
# 1) Transforming from numpy-array to pandas DataFrame in a pipeline, before specifying a causal model.
# 2) Possible generic support for causallib's additional `a` parameter, along with `X` and `y`.
class StandardScaler(BaseEstimator, TransformerMixin):
"""
Standardize continuous features by removing the mean and scaling to unit variance while allowing nans.
X = (X - X.mean()) / X.std()
"""
def __init__(self, with_mean=True, with_std=True, ignore_nans=True):
"""
Args:
with_mean (bool): Whether to center the data before scaling.
with_std (bool): Whether to scale the data to unit variance.
ignore_nans (bool): Whether to ignore NaNs during calculation.
"""
self.with_mean = with_mean
self.with_std = with_std
self.ignore_nans = ignore_nans
def fit(self, X, y=None):
"""
Compute the mean and std to be used for later scaling.
Args:
X (pd.DataFrame): The data used to compute the mean and standard deviation used for later scaling along the
features axis (axis=0).
y: Passthrough for ``Pipeline`` compatibility.
Returns:
StandardScaler: A fitted standard-scaler
"""
continuous_features = self._get_relevant_features(X)
self._feature_mask_ = continuous_features
if self.with_mean:
means = X.loc[:, self._feature_mask_].mean(skipna=self.ignore_nans)
else:
means = pd.Series(0, index=continuous_features)
self.mean_ = means
if self.with_std:
scales = X.loc[:, self._feature_mask_].std(skipna=self.ignore_nans)
else:
scales = | pd.Series(1, index=continuous_features) | pandas.Series |
import glob
import os
import pandas as pd
from fds.datax.utils.ipyexit import IpyExit
class FdsDataStoreLedger:
def __init__(self, dir_path):
self.dir_path = dir_path
def __load_cache_details__(self):
"""
This function will check for available FDS Caches within the
existing working directory. The full list will be stored in a
"fds_cache_details.txt" file within an fdsDataStore directory within
the current working directory.
"""
cache = os.path.join(self.dir_path, "fdsDataStore", "fds_cache_details.txt")
if len(glob.glob(cache, recursive=False)) == 0:
return None
else:
caches = pd.read_csv(
cache,
sep="|",
index_col="Cache Name",
parse_dates=["Start Date", "End Date", "Last Update Date"],
)
return caches
def avail_caches(self):
"""
Determine if caches exist in the established working directory.
"""
df = self.__load_cache_details__()
if type(df) is not type(None):
return df
else:
print(
"No existing Data Cache Universes exist in this Data Store. Use the fds.datax.Universe.create() function to "
"generate a new data cache universe."
)
def cache_ledger(
self, cache_name, source, mssql_dsn, currency, etf_ticker, start_date, end_date
):
"""
This function is used to:
-Determine if the fdsDataStore directory exists in the current working dir.
-Check if a fds_cache_details.txt file exists.
-Create or update the fds_cache_details with latest information.
"""
cols = [
"Cache Name",
"Source",
"Cache Location",
"MSSQL DSN",
"ETF Ticker",
"Currency",
"Start Date",
"End Date",
"Last Update Date",
]
cache = os.path.join(self.dir_path, "fdsDataStore", "fds_cache_details.txt")
if len(glob.glob(cache, recursive=False)) > 0:
print("Cache Detail File Found.")
# cache file exists create, read in cache
df = pd.read_csv(
cache,
sep="|",
index_col="Cache Name",
parse_dates=["Start Date", "End Date", "Last Update Date"],
)
else:
# no ledger, create new dataframe:
print("Cache Detail Not File Found, Creating File.")
df = | pd.DataFrame(columns=cols) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# @Time : 2022/3/8 9:00 上午
# @Author : heisenberg
# @File : fuzzymatching.py
# @Project : sufe-cs-conf-ddl
# @Target : matching CCF info with Tenure Track info from short titles, likes v-lookup opreartion.
# 提前要安装的package
# pip install fuzzywuzzy
# pip install python-Levenshtein
import pandas as pd
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
def fuzzy_merge(df_1, df_2, key1, key2, threshold=90, limit=2):
s = df_2[key2].tolist()
m = df_1[key1].apply(lambda x: process.extract(x, s, limit=limit))
df_1['matches'] = m
m2 = df_1['matches'].apply(lambda x: ', '.join([i[0] for i in x if i[1] >= threshold]))
df_1['matches'] = m2
df_1['merge_key'] = df_1['matches']
df_2['merge_key'] = df_2[key2]
df = pd.merge(df_1, df_2, how='left',on='merge_key')
return df
df1 = pd.DataFrame([['Apple','A'],['Banana','B'],['Orange','C'],['Strawberry','D'],['Mango','G']], columns=['Fruits','AA'])
df2 = pd.DataFrame([['Aple','a'],['Bannanna','b'],['Orag','c'],['Strawb','d']], columns=['Fruits','aa'])
all_CCF_path = r'C:\Users\Administrator.WIN-T57L97EEQAK\Desktop\博士毕业\中国计算机学会CCF推荐会议.xlsx'
sime_tenure_path = r'F:\VSCodeProjects\SIME-conference-DDL\.readme_assets\SIME-Tenure-CS-Conference.xlsx'
all_CCF_pd = pd.read_excel(all_CCF_path)
sime_tenure_pd = | pd.read_excel(sime_tenure_path) | pandas.read_excel |
#%%
import datetime
import time
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from giskard.plot import merge_axes, soft_axis_off
from pkg.data import load_network_palette, load_unmatched
from pkg.io import FIG_PATH
from pkg.io import glue as default_glue
from pkg.io import savefig
from pkg.plot import SmartSVG, rainbowarrow, set_theme
from pkg.stats import erdos_renyi_test, stochastic_block_test
from pkg.utils import sample_toy_networks
from scipy.interpolate import interp1d
from svgutils.compose import Figure, Panel, Text
from tqdm import tqdm
from pkg.utils import remove_group
DISPLAY_FIGS = True
FILENAME = "thresholding_tests"
FIG_PATH = FIG_PATH / FILENAME
def glue(name, var, **kwargs):
default_glue(name, var, FILENAME, **kwargs)
def gluefig(name, fig, **kwargs):
savefig(name, foldername=FILENAME, **kwargs)
glue(name, fig, figure=True)
if not DISPLAY_FIGS:
plt.close()
t0 = time.time()
set_theme()
#%%
network_palette, NETWORK_KEY = load_network_palette()
left_adj, left_nodes = load_unmatched("left", weights=True)
right_adj, right_nodes = load_unmatched("right", weights=True)
neutral_color = sns.color_palette("Set2")[2]
GROUP_KEY = "simple_group"
left_labels = left_nodes[GROUP_KEY].values
right_labels = right_nodes[GROUP_KEY].values
#%%
fig, axs = plt.subplots(2, 1, figsize=(4, 5))
g = nx.DiGraph()
g.add_edge(0, 1, weight=4)
pos = {0: (0.1, 0.65), 1: (0.5, 0.65)}
ax = axs[0]
soft_axis_off(ax)
nx.draw_networkx(
g,
pos,
ax=ax,
arrowstyle="-",
connectionstyle="arc3,rad=-0.5",
width=3,
node_size=500,
with_labels=False,
)
nx.draw_networkx(
g,
pos,
ax=ax,
arrowstyle="-",
connectionstyle="arc3,rad=0.5",
width=3,
node_size=500,
with_labels=False,
)
ax.plot([0.45], [0.73], "o", color="black")
ax.set(xlim=(0, 1), ylim=(0, 1))
ax.set_ylabel("Synapse\ncount", rotation=0, ha="right")
ax.text(0.65, 0.65, "Weight = 2", fontsize="medium")
ax = axs[1]
soft_axis_off(ax)
nx.draw_networkx(
g,
pos,
ax=ax,
arrowstyle="-|>",
connectionstyle="arc3,rad=-0.5",
width=3,
node_size=500,
with_labels=False,
)
ax.set(xlim=(0, 1), ylim=(0, 1))
ax.text(0.65, 0.65, "Weight = 2 / 5", fontsize="large")
def draw_input(xytext):
ax.annotate(
"",
(0.5, 0.65),
xytext=xytext,
textcoords="offset points",
arrowprops=dict(
arrowstyle="-|>",
connectionstyle="arc3",
facecolor="grey",
linewidth=3,
shrinkB=10,
edgecolor="grey"
# width=0.5,
# mutation_scale=0.5,
),
)
draw_input((-25, -25))
draw_input((0, -35))
draw_input((-35, 0))
draw_input((0, 35))
ax.set_ylabel("Input\nproportion", rotation=0, ha="right")
fig.set_facecolor("w")
#%%
fig, axs = plt.subplots(2, 1, figsize=(4, 5), gridspec_kw=dict(hspace=0))
from matplotlib.patches import FancyArrowPatch
from matplotlib.patches import Circle
set_theme(font_scale=1)
source_loc = (0.25, 0.5)
target_loc = (0.75, 0.5)
radius = 0.05
dim_color = "black"
dark_color = "black"
def draw_synapse_end(rad_factor, color="black"):
rad = np.pi * rad_factor
x = np.cos(rad)
y = np.sin(rad)
scale_factor = 1.6
x *= radius * scale_factor
y *= radius * scale_factor
x += target_loc[0]
y += target_loc[1]
c = Circle((x, y), radius=0.0125, color=color)
ax.add_patch(c)
def draw_synapse(source_loc, connection_rad=0, end_rad=0, color="black"):
fa = FancyArrowPatch(
posA=source_loc,
posB=target_loc,
connectionstyle=f"arc3,rad={connection_rad}",
shrinkB=30,
color=color,
)
ax.add_patch(fa)
draw_synapse_end(end_rad, color=color)
def draw_neurons():
source_circle = Circle(
(source_loc),
radius=radius,
facecolor=neutral_color,
edgecolor="black",
linewidth=2,
zorder=10,
)
ax.add_patch(source_circle)
ax.text(*source_loc, r"$i$", zorder=11, va="center", ha="center")
target_circle = Circle(
(target_loc),
radius=radius,
facecolor=neutral_color,
edgecolor="black",
linewidth=2,
zorder=10,
)
ax.add_patch(target_circle)
ax.text(*target_loc, r"$j$", zorder=11, va="center", ha="center")
def set_lims(ax):
ax.set_xlim(0.19, 0.81)
ax.set_ylim(0.3, 0.7)
ax = axs[0]
ax.text(0.93, 0.5, 2, fontsize="large", va="center", ha="center")
soft_axis_off(ax)
ax.set_ylabel("Synapse\ncount", rotation=0, ha="right", va="center", labelpad=20)
draw_neurons()
draw_synapse(source_loc, connection_rad=-0.5, end_rad=0.75)
draw_synapse(source_loc, connection_rad=0.5, end_rad=-0.75)
set_lims(ax)
ax.annotate(
r"Synapse from $i$ to $j$",
(0.5, 0.63),
xytext=(40, 25),
textcoords="offset points",
ha="center",
arrowprops=dict(arrowstyle="-|>", facecolor="black", relpos=(0.25, 0)),
fontsize="small",
)
# ax.text(0.65, 0.65, "Weight = 2", fontsize="medium")
ax = axs[1]
ax.text(0.93, 0.5, "2 / 5", fontsize="large", va="center", ha="center")
soft_axis_off(ax)
ax.set_ylabel("Input\nproportion", rotation=0, ha="right", va="center", labelpad=20)
# ax.text(0.65, 0.65, "Weight = 2 / 5", fontsize="medium")
draw_neurons()
draw_synapse(source_loc, connection_rad=-0.5, end_rad=0.75)
draw_synapse(source_loc, connection_rad=0.5, end_rad=-0.75)
dist = 0.15
draw_synapse(
(target_loc[0], target_loc[1] + dist),
connection_rad=0,
end_rad=0.5,
color=dim_color,
)
draw_synapse(
(target_loc[0] - dist, target_loc[1]),
connection_rad=0,
end_rad=1,
color=dim_color,
)
draw_synapse(
(target_loc[0], target_loc[1] - dist),
connection_rad=0,
end_rad=-0.5,
color=dim_color,
)
set_lims(ax)
ax.annotate(
r"Synapse from not $i$ to $j$",
(0.75, 0.4),
xytext=(-10, -50),
textcoords="offset points",
ha="right",
arrowprops=dict(arrowstyle="-|>", facecolor="black", relpos=(0.75, 1)),
fontsize="small",
)
fig.set_facecolor("w")
fig.text(0.07, 0.89, "Weight\n type", fontsize="large", ha="right")
fig.text(0.97, 0.89, "Weight\n" + r"$i \rightarrow$ j", fontsize="large")
# fig.text(0.1, 0.9, "Source\nneuron")
# fig.text(0.75, 0.9, "Target\nneuron")
import matplotlib as mpl
border_color = "lightgrey"
line1 = mpl.lines.Line2D(
(-0.25, 1.2),
(0.5, 0.5),
transform=fig.transFigure,
color=border_color,
linewidth=1.5,
)
line2 = mpl.lines.Line2D(
(0.95, 0.95),
(0.15, 0.85),
transform=fig.transFigure,
color=border_color,
linewidth=1.5,
)
line3 = mpl.lines.Line2D(
(0.1, 0.1),
(0.15, 0.85),
transform=fig.transFigure,
color=border_color,
linewidth=1.5,
)
fig.lines = (line1, line2, line3)
gluefig("weight_notions", fig)
# %%
# %%
#%%
rng = np.random.default_rng(8888)
A1, A2, node_data = sample_toy_networks()
node_data["labels"] = np.ones(len(node_data), dtype=int)
palette = {1: sns.color_palette("Set2")[2]}
g1 = nx.from_numpy_array(A1)
g2 = nx.from_numpy_array(A2)
pos1 = nx.kamada_kawai_layout(g1)
pos2 = nx.kamada_kawai_layout(g2)
def weight_adjacency(A, scale=6):
A = A.copy()
sources, targets = np.nonzero(A)
for source, target in zip(sources, targets):
# weight = rng.poisson(scale)
weight = rng.uniform(1, 10)
A[source, target] = weight
return A
def layoutplot(
g,
pos,
nodes,
ax=None,
figsize=(10, 10),
weight_scale=1,
node_alpha=1,
node_size=300,
palette=None,
edge_alpha=0.4,
edge_color="black",
):
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
edgelist = g.edges()
weights = np.array([g[u][v]["weight"] for u, v in edgelist])
weights *= weight_scale
nx.draw_networkx_nodes(
g,
pos,
nodelist=nodes.index,
node_color=nodes["labels"].map(palette),
edgecolors="black",
alpha=node_alpha,
node_size=node_size,
ax=ax,
)
nx.draw_networkx_edges(
g,
pos,
edgelist=edgelist,
nodelist=nodes.index,
width=weights,
edge_vmin=-3,
edge_vmax=9,
edge_color=weights,
alpha=edge_alpha,
ax=ax,
node_size=node_size,
)
soft_axis_off(ax)
return ax
set_theme(font_scale=1.75)
fig, axs = plt.subplots(
4,
3,
figsize=(12, 10),
constrained_layout=True,
gridspec_kw=dict(height_ratios=[0.5, 1, 0.25, 1], hspace=0, wspace=0),
)
A1 = weight_adjacency(A1)
A2 = weight_adjacency(A2)
kwargs = dict(
palette=palette, edge_alpha=1, edge_color=(0.65, 0.65, 0.65), weight_scale=0.75
)
thresholds = [1, 4, 7]
for i in range(3):
A1[A1 < thresholds[i]] = 0
A2[A2 < thresholds[i]] = 0
g1 = nx.from_numpy_array(A1)
g2 = nx.from_numpy_array(A2)
ax = axs[1, i]
layoutplot(g1, pos1, node_data, ax=ax, **kwargs)
ax = axs[3, i]
layoutplot(g2, pos2, node_data, ax=ax, **kwargs)
ax = merge_axes(fig, axs, rows=0)
rainbowarrow(ax, start=(0.1, 0.5), end=(0.9, 0.5), cmap="Greys", n=1000, lw=30)
ax.set(ylim=(0.4, 0.8), xlim=(0, 1))
ax.set_title("Increasing edge weight threshold", fontsize="large", y=0.5)
ax.axis("off")
def draw_comparison(ax):
ax.text(
0.48, 0.35, r"$\overset{?}{=}$", fontsize="xx-large", ha="center", va="center"
)
# ax.plot([0.5, 0.5], [-0.5, 1.25], clip_on=False, linewidth=2, color='darkgrey')
ax.set(ylim=(0, 1), xlim=(0, 1))
ax.axis("off")
ax = axs[2, 0]
draw_comparison(ax)
ax = axs[2, 1]
draw_comparison(ax)
ax = axs[2, 2]
draw_comparison(ax)
ax.annotate(
"Rerun all\n tests",
(0.6, 0.6),
xytext=(45, 0),
textcoords="offset points",
arrowprops=dict(arrowstyle="-|>", facecolor="black"),
fontsize="medium",
va="center",
)
axs[1, 0].set_ylabel(
"Left",
color=network_palette["Left"],
size="large",
rotation=0,
ha="right",
labelpad=10,
)
axs[3, 0].set_ylabel(
"Right",
color=network_palette["Right"],
size="large",
rotation=0,
ha="right",
labelpad=10,
)
fig.set_facecolor("w")
gluefig("thresholding_methods", fig)
# %%
def construct_weight_data(left_adj, right_adj):
indices = np.nonzero(left_adj)
left_weights = left_adj[indices]
indices = np.nonzero(right_adj)
right_weights = right_adj[indices]
labels = np.concatenate(
(len(left_weights) * ["Left"], len(right_weights) * ["Right"])
)
weights = np.concatenate((left_weights, right_weights))
weight_data = | pd.Series(data=weights, name="weights") | pandas.Series |
# -*- coding: utf-8 -*-
from __future__ import print_function
from distutils.version import LooseVersion
from numpy import nan, random
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, Timestamp,
date_range)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData, _check_mixed_float
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
class TestDataFrameMissingData(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_dropEmptyRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
original = Series(mat, index=self.frame.index, name='foo')
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna(how='all')
# check that original was preserved
assert_series_equal(frame['foo'], original)
inplace_frame1.dropna(how='all', inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame1['foo'], expected)
smaller_frame = frame.dropna(how='all', subset=['foo'])
inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame2['foo'], expected)
def test_dropIncompleteRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
frame['bar'] = 5
original = Series(mat, index=self.frame.index, name='foo')
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna()
assert_series_equal(frame['foo'], original)
inp_frame1.dropna(inplace=True)
exp = Series(mat[5:], index=self.frame.index[5:], name='foo')
tm.assert_series_equal(smaller_frame['foo'], exp)
tm.assert_series_equal(inp_frame1['foo'], exp)
samesize_frame = frame.dropna(subset=['bar'])
assert_series_equal(frame['foo'], original)
self.assertTrue((frame['bar'] == 5).all())
inp_frame2.dropna(subset=['bar'], inplace=True)
self.assert_index_equal(samesize_frame.index, self.frame.index)
self.assert_index_equal(inp_frame2.index, self.frame.index)
def test_dropna(self):
df = DataFrame(np.random.randn(6, 4))
df[2][:2] = nan
dropped = df.dropna(axis=1)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
# threshold
dropped = df.dropna(axis=1, thresh=5)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, thresh=5, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0, thresh=4)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, thresh=4, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=1, thresh=4)
assert_frame_equal(dropped, df)
dropped = df.dropna(axis=1, thresh=3)
assert_frame_equal(dropped, df)
# subset
dropped = df.dropna(axis=0, subset=[0, 1, 3])
inp = df.copy()
inp.dropna(axis=0, subset=[0, 1, 3], inplace=True)
assert_frame_equal(dropped, df)
assert_frame_equal(inp, df)
# all
dropped = df.dropna(axis=1, how='all')
assert_frame_equal(dropped, df)
df[2] = nan
dropped = df.dropna(axis=1, how='all')
expected = df.ix[:, [0, 1, 3]]
assert_frame_equal(dropped, expected)
# bad input
self.assertRaises(ValueError, df.dropna, axis=3)
def test_drop_and_dropna_caching(self):
# tst that cacher updates
original = Series([1, 2, np.nan], name='A')
expected = Series([1, 2], dtype=original.dtype, name='A')
df = pd.DataFrame({'A': original.values.copy()})
df2 = df.copy()
df['A'].dropna()
assert_series_equal(df['A'], original)
df['A'].dropna(inplace=True)
assert_series_equal(df['A'], expected)
df2['A'].drop([1])
assert_series_equal(df2['A'], original)
df2['A'].drop([1], inplace=True)
assert_series_equal(df2['A'], original.drop([1]))
def test_dropna_corner(self):
# bad input
self.assertRaises(ValueError, self.frame.dropna, how='foo')
self.assertRaises(TypeError, self.frame.dropna, how=None)
# non-existent column - 8303
self.assertRaises(KeyError, self.frame.dropna, subset=['A', 'X'])
def test_dropna_multiple_axes(self):
df = DataFrame([[1, np.nan, 2, 3],
[4, np.nan, 5, 6],
[np.nan, np.nan, np.nan, np.nan],
[7, np.nan, 8, 9]])
cp = df.copy()
result = df.dropna(how='all', axis=[0, 1])
result2 = df.dropna(how='all', axis=(0, 1))
expected = df.dropna(how='all').dropna(how='all', axis=1)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(df, cp)
inp = df.copy()
inp.dropna(how='all', axis=(0, 1), inplace=True)
assert_frame_equal(inp, expected)
def test_fillna(self):
self.tsframe.ix[:5, 'A'] = nan
self.tsframe.ix[-5:, 'A'] = nan
zero_filled = self.tsframe.fillna(0)
self.assertTrue((zero_filled.ix[:5, 'A'] == 0).all())
padded = self.tsframe.fillna(method='pad')
self.assertTrue(np.isnan(padded.ix[:5, 'A']).all())
self.assertTrue((padded.ix[-5:, 'A'] == padded.ix[-5, 'A']).all())
# mixed type
self.mixed_frame.ix[5:20, 'foo'] = nan
self.mixed_frame.ix[-10:, 'A'] = nan
result = self.mixed_frame.fillna(value=0)
result = self.mixed_frame.fillna(method='pad')
self.assertRaises(ValueError, self.tsframe.fillna)
self.assertRaises(ValueError, self.tsframe.fillna, 5, method='ffill')
# mixed numeric (but no float16)
mf = self.mixed_float.reindex(columns=['A', 'B', 'D'])
mf.ix[-10:, 'A'] = nan
result = mf.fillna(value=0)
_check_mixed_float(result, dtype=dict(C=None))
result = mf.fillna(method='pad')
_check_mixed_float(result, dtype=dict(C=None))
# empty frame (GH #2778)
df = DataFrame(columns=['x'])
for m in ['pad', 'backfill']:
df.x.fillna(method=m, inplace=1)
df.x.fillna(method=m)
# with different dtype (GH3386)
df = DataFrame([['a', 'a', np.nan, 'a'], [
'b', 'b', np.nan, 'b'], ['c', 'c', np.nan, 'c']])
result = df.fillna({2: 'foo'})
expected = DataFrame([['a', 'a', 'foo', 'a'],
['b', 'b', 'foo', 'b'],
['c', 'c', 'foo', 'c']])
assert_frame_equal(result, expected)
df.fillna({2: 'foo'}, inplace=True)
assert_frame_equal(df, expected)
# limit and value
df = DataFrame(np.random.randn(10, 3))
df.iloc[2:7, 0] = np.nan
df.iloc[3:5, 2] = np.nan
expected = df.copy()
expected.iloc[2, 0] = 999
expected.iloc[3, 2] = 999
result = df.fillna(999, limit=1)
assert_frame_equal(result, expected)
# with datelike
# GH 6344
df = DataFrame({
'Date': [pd.NaT, Timestamp("2014-1-1")],
'Date2': [Timestamp("2013-1-1"), pd.NaT]
})
expected = df.copy()
expected['Date'] = expected['Date'].fillna(df.ix[0, 'Date2'])
result = df.fillna(value={'Date': df['Date2']})
assert_frame_equal(result, expected)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = DataFrame(index=["A", "B", "C"], columns=[1, 2, 3, 4, 5])
result = df.get_dtype_counts().sort_values()
expected = Series({'object': 5})
assert_series_equal(result, expected)
result = df.fillna(1)
expected = DataFrame(1, index=["A", "B", "C"], columns=[1, 2, 3, 4, 5])
result = result.get_dtype_counts().sort_values()
expected = Series({'int64': 5})
assert_series_equal(result, expected)
# empty block
df = DataFrame(index=lrange(3), columns=['A', 'B'], dtype='float64')
result = df.fillna('nan')
expected = DataFrame('nan', index=lrange(3), columns=['A', 'B'])
assert_frame_equal(result, expected)
# equiv of replace
df = DataFrame(dict(A=[1, np.nan], B=[1., 2.]))
for v in ['', 1, np.nan, 1.0]:
expected = df.replace(np.nan, v)
result = df.fillna(v)
assert_frame_equal(result, expected)
def test_fillna_datetime_columns(self):
# GH 7095
df = pd.DataFrame({'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]},
index=date_range('20130110', periods=3))
result = df.fillna('?')
expected = pd.DataFrame({'A': [-1, -2, '?'],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', '?'],
'D': ['foo2', 'bar2', '?']},
index=date_range('20130110', periods=3))
self.assert_frame_equal(result, expected)
df = pd.DataFrame({'A': [-1, -2, np.nan],
'B': [pd.Timestamp('2013-01-01'),
pd.Timestamp('2013-01-02'), pd.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]},
index=date_range('20130110', periods=3))
result = df.fillna('?')
expected = pd.DataFrame({'A': [-1, -2, '?'],
'B': [pd.Timestamp('2013-01-01'),
pd.Timestamp('2013-01-02'), '?'],
'C': ['foo', 'bar', '?'],
'D': ['foo2', 'bar2', '?']},
index=pd.date_range('20130110', periods=3))
self.assert_frame_equal(result, expected)
def test_ffill(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
assert_frame_equal(self.tsframe.ffill(),
self.tsframe.fillna(method='ffill'))
def test_bfill(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
assert_frame_equal(self.tsframe.bfill(),
self.tsframe.fillna(method='bfill'))
def test_fillna_skip_certain_blocks(self):
# don't try to fill boolean, int blocks
df = DataFrame(np.random.randn(10, 4).astype(int))
# it works!
df.fillna(np.nan)
def test_fillna_inplace(self):
df = DataFrame(np.random.randn(10, 4))
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(value=0)
self.assertIsNot(expected, df)
df.fillna(value=0, inplace=True)
assert_frame_equal(df, expected)
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(method='ffill')
self.assertIsNot(expected, df)
df.fillna(method='ffill', inplace=True)
assert_frame_equal(df, expected)
def test_fillna_dict_series(self):
df = DataFrame({'a': [nan, 1, 2, nan, nan],
'b': [1, 2, 3, nan, nan],
'c': [nan, 1, 2, 3, 4]})
result = df.fillna({'a': 0, 'b': 5})
expected = df.copy()
expected['a'] = expected['a'].fillna(0)
expected['b'] = expected['b'].fillna(5)
assert_frame_equal(result, expected)
# it works
result = df.fillna({'a': 0, 'b': 5, 'd': 7})
# Series treated same as dict
result = df.fillna(df.max())
expected = df.fillna(df.max().to_dict())
assert_frame_equal(result, expected)
# disable this for now
with assertRaisesRegexp(NotImplementedError, 'column by column'):
df.fillna(df.max(1), axis=1)
def test_fillna_dataframe(self):
# GH 8377
df = DataFrame({'a': [nan, 1, 2, nan, nan],
'b': [1, 2, 3, nan, nan],
'c': [nan, 1, 2, 3, 4]},
index=list('VWXYZ'))
# df2 may have different index and columns
df2 = DataFrame({'a': [nan, 10, 20, 30, 40],
'b': [50, 60, 70, 80, 90],
'foo': ['bar'] * 5},
index=list('VWXuZ'))
result = df.fillna(df2)
# only those columns and indices which are shared get filled
expected = DataFrame({'a': [nan, 1, 2, nan, 40],
'b': [1, 2, 3, nan, 90],
'c': [nan, 1, 2, 3, 4]},
index=list('VWXYZ'))
assert_frame_equal(result, expected)
def test_fillna_columns(self):
df = DataFrame(np.random.randn(10, 10))
df.values[:, ::2] = np.nan
result = df.fillna(method='ffill', axis=1)
expected = df.T.fillna(method='pad').T
assert_frame_equal(result, expected)
df.insert(6, 'foo', 5)
result = df.fillna(method='ffill', axis=1)
expected = df.astype(float).fillna(method='ffill', axis=1)
assert_frame_equal(result, expected)
def test_fillna_invalid_method(self):
with assertRaisesRegexp(ValueError, 'ffil'):
self.frame.fillna(method='ffil')
def test_fillna_invalid_value(self):
# list
self.assertRaises(TypeError, self.frame.fillna, [1, 2])
# tuple
self.assertRaises(TypeError, self.frame.fillna, (1, 2))
# frame with series
self.assertRaises(ValueError, self.frame.iloc[:, 0].fillna,
self.frame)
def test_fillna_col_reordering(self):
cols = ["COL." + str(i) for i in range(5, 0, -1)]
data = np.random.rand(20, 5)
df = DataFrame(index=lrange(20), columns=cols, data=data)
filled = df.fillna(method='ffill')
self.assertEqual(df.columns.tolist(), filled.columns.tolist())
def test_fill_corner(self):
self.mixed_frame.ix[5:20, 'foo'] = nan
self.mixed_frame.ix[-10:, 'A'] = nan
filled = self.mixed_frame.fillna(value=0)
self.assertTrue((filled.ix[5:20, 'foo'] == 0).all())
del self.mixed_frame['foo']
empty_float = self.frame.reindex(columns=[])
# TODO(wesm): unused?
result = empty_float.fillna(value=0) # noqa
def test_fill_value_when_combine_const(self):
# GH12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype='float')
df = DataFrame({'foo': dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
assert_frame_equal(res, exp)
class TestDataFrameInterpolate(tm.TestCase, TestData):
def test_interp_basic(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
expected = DataFrame({'A': [1., 2., 3., 4.],
'B': [1., 4., 9., 9.],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df.interpolate()
assert_frame_equal(result, expected)
result = df.set_index('C').interpolate()
expected = df.set_index('C')
expected.loc[3, 'A'] = 3
expected.loc[5, 'B'] = 9
assert_frame_equal(result, expected)
def test_interp_bad_method(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
with tm.assertRaises(ValueError):
df.interpolate(method='not_a_method')
def test_interp_combo(self):
df = DataFrame({'A': [1., 2., np.nan, 4.],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df['A'].interpolate()
expected = Series([1., 2., 3., 4.], name='A')
assert_series_equal(result, expected)
result = df['A'].interpolate(downcast='infer')
expected = Series([1, 2, 3, 4], name='A')
assert_series_equal(result, expected)
def test_interp_nan_idx(self):
df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [np.nan, 2, 3, 4]})
df = df.set_index('A')
with tm.assertRaises(NotImplementedError):
df.interpolate(method='values')
def test_interp_various(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
df = df.set_index('C')
expected = df.copy()
result = df.interpolate(method='polynomial', order=1)
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923076
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
from unittest.mock import MagicMock, patch
import numpy as np
import pandas as pd
import pytest
from sklearn.model_selection import StratifiedKFold
from evalml import AutoMLSearch
from evalml.automl.callbacks import raise_error_callback
from evalml.automl.pipeline_search_plots import SearchIterationPlot
from evalml.exceptions import PipelineNotFoundError
from evalml.model_family import ModelFamily
from evalml.objectives import (
FraudCost,
Precision,
PrecisionMicro,
Recall,
get_core_objectives,
get_objective,
)
from evalml.pipelines import (
BinaryClassificationPipeline,
MulticlassClassificationPipeline,
PipelineBase,
TimeSeriesBinaryClassificationPipeline,
TimeSeriesMulticlassClassificationPipeline,
)
from evalml.pipelines.components.utils import get_estimators
from evalml.pipelines.utils import make_pipeline
from evalml.preprocessing import TimeSeriesSplit, split_data
from evalml.problem_types import ProblemTypes
def test_init(X_y_binary):
X, y = X_y_binary
automl = AutoMLSearch(
X_train=X, y_train=y, problem_type="binary", max_iterations=1, n_jobs=1
)
automl.search()
assert automl.n_jobs == 1
assert isinstance(automl.rankings, pd.DataFrame)
assert isinstance(automl.best_pipeline, PipelineBase)
automl.best_pipeline.predict(X)
# test with dataframes
automl = AutoMLSearch(
pd.DataFrame(X), pd.Series(y), problem_type="binary", max_iterations=1, n_jobs=1
)
automl.search()
assert isinstance(automl.rankings, pd.DataFrame)
assert isinstance(automl.full_rankings, pd.DataFrame)
assert isinstance(automl.best_pipeline, PipelineBase)
assert isinstance(automl.get_pipeline(0), PipelineBase)
assert automl.objective.name == "Log Loss Binary"
automl.best_pipeline.predict(X)
def test_init_objective(X_y_binary):
X, y = X_y_binary
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
objective=Precision(),
max_iterations=1,
)
assert isinstance(automl.objective, Precision)
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
objective="Precision",
max_iterations=1,
)
assert isinstance(automl.objective, Precision)
def test_get_pipeline_none(X_y_binary):
X, y = X_y_binary
automl = AutoMLSearch(X_train=X, y_train=y, problem_type="binary")
with pytest.raises(PipelineNotFoundError, match="Pipeline not found"):
automl.describe_pipeline(0)
def test_data_splitter(X_y_binary):
X, y = X_y_binary
cv_folds = 5
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
data_splitter=StratifiedKFold(n_splits=cv_folds),
max_iterations=1,
n_jobs=1,
)
automl.search()
assert isinstance(automl.rankings, pd.DataFrame)
assert len(automl.results["pipeline_results"][0]["cv_data"]) == cv_folds
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
data_splitter=TimeSeriesSplit(n_splits=cv_folds),
max_iterations=1,
n_jobs=1,
)
automl.search()
assert isinstance(automl.rankings, pd.DataFrame)
assert len(automl.results["pipeline_results"][0]["cv_data"]) == cv_folds
def test_max_iterations(AutoMLTestEnv, X_y_binary):
X, y = X_y_binary
max_iterations = 5
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
max_iterations=max_iterations,
n_jobs=1,
)
env = AutoMLTestEnv("binary")
with env.test_context(score_return_value={automl.objective.name: 0.2}):
automl.search()
assert len(automl.full_rankings) == max_iterations
def test_recall_error(X_y_binary):
X, y = X_y_binary
# Recall is a valid objective but it's not allowed in AutoML so a ValueError is expected
error_msg = "recall is not allowed in AutoML!"
with pytest.raises(ValueError, match=error_msg):
AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
objective="recall",
max_iterations=1,
)
def test_recall_object(X_y_binary):
X, y = X_y_binary
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
objective=Recall(),
max_iterations=1,
n_jobs=1,
)
automl.search()
assert len(automl.full_rankings) > 0
assert automl.objective.name == "Recall"
def test_binary_auto(X_y_binary):
X, y = X_y_binary
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
objective="Log Loss Binary",
max_iterations=3,
n_jobs=1,
)
automl.search()
best_pipeline = automl.best_pipeline
assert best_pipeline._is_fitted
y_pred = best_pipeline.predict(X)
assert len(np.unique(y_pred)) == 2
def test_multi_auto(X_y_multi):
multiclass_objectives = get_core_objectives("multiclass")
X, y = X_y_multi
objective = PrecisionMicro()
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="multiclass",
objective=objective,
max_iterations=3,
n_jobs=1,
)
automl.search()
best_pipeline = automl.best_pipeline
assert best_pipeline._is_fitted
y_pred = best_pipeline.predict(X)
assert len(np.unique(y_pred)) == 3
objective_in_additional_objectives = next(
(obj for obj in multiclass_objectives if obj.name == objective.name), None
)
multiclass_objectives.remove(objective_in_additional_objectives)
for expected, additional in zip(
multiclass_objectives, automl.additional_objectives
):
assert type(additional) is type(expected)
def test_multi_objective(X_y_multi):
X, y = X_y_multi
automl = AutoMLSearch(
X_train=X, y_train=y, problem_type="binary", objective="Log Loss Binary"
)
assert automl.problem_type == ProblemTypes.BINARY
automl = AutoMLSearch(
X_train=X, y_train=y, problem_type="multiclass", objective="Log Loss Multiclass"
)
assert automl.problem_type == ProblemTypes.MULTICLASS
automl = AutoMLSearch(
X_train=X, y_train=y, problem_type="multiclass", objective="AUC Micro"
)
assert automl.problem_type == ProblemTypes.MULTICLASS
automl = AutoMLSearch(X_train=X, y_train=y, problem_type="binary", objective="AUC")
assert automl.problem_type == ProblemTypes.BINARY
automl = AutoMLSearch(X_train=X, y_train=y, problem_type="multiclass")
assert automl.problem_type == ProblemTypes.MULTICLASS
automl = AutoMLSearch(X_train=X, y_train=y, problem_type="binary")
assert automl.problem_type == ProblemTypes.BINARY
def test_categorical_classification(X_y_categorical_classification):
X, y = X_y_categorical_classification
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
objective="precision",
max_batches=1,
n_jobs=1,
)
automl.search()
assert not automl.rankings["mean_cv_score"].isnull().any()
def test_random_seed(X_y_binary):
X, y = X_y_binary
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
objective=Precision(),
max_batches=1,
random_seed=0,
n_jobs=1,
)
automl.search()
automl_1 = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
objective=Precision(),
random_seed=0,
n_jobs=1,
)
automl_1.search()
assert automl.rankings.equals(automl_1.rankings)
def test_callback(X_y_binary):
X, y = X_y_binary
counts = {
"start_iteration_callback": 0,
"add_result_callback": 0,
}
def start_iteration_callback(pipeline, automl_obj, counts=counts):
counts["start_iteration_callback"] += 1
def add_result_callback(results, trained_pipeline, automl_obj, counts=counts):
counts["add_result_callback"] += 1
max_iterations = 3
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
objective=Precision(),
max_iterations=max_iterations,
start_iteration_callback=start_iteration_callback,
add_result_callback=add_result_callback,
n_jobs=1,
)
automl.search()
assert counts["start_iteration_callback"] == len(get_estimators("binary")) + 1
assert counts["add_result_callback"] == max_iterations
def test_additional_objectives(X_y_binary):
X, y = X_y_binary
objective = FraudCost(
retry_percentage=0.5,
interchange_fee=0.02,
fraud_payout_percentage=0.75,
amount_col=10,
)
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
objective="F1",
max_iterations=2,
additional_objectives=[objective],
n_jobs=1,
)
automl.search()
results = automl.describe_pipeline(0, return_dict=True)
assert "Fraud Cost" in list(results["cv_data"][0]["all_objective_scores"].keys())
def test_optimizable_threshold_enabled(
AutoMLTestEnv,
X_y_binary,
caplog,
):
X, y = X_y_binary
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
objective="precision",
max_iterations=1,
optimize_thresholds=True,
)
env = AutoMLTestEnv("binary")
with env.test_context(
score_return_value={"precision": 1.0},
optimize_threshold_return_value=0.8,
):
automl.search()
env.mock_fit.assert_called()
env.mock_score.assert_called()
env.mock_predict_proba.assert_called()
env.mock_optimize_threshold.assert_called()
assert automl.best_pipeline.threshold == 0.8
assert (
automl.results["pipeline_results"][0]["cv_data"][0].get(
"binary_classification_threshold"
)
== 0.8
)
assert (
automl.results["pipeline_results"][0]["cv_data"][1].get(
"binary_classification_threshold"
)
== 0.8
)
assert (
automl.results["pipeline_results"][0]["cv_data"][2].get(
"binary_classification_threshold"
)
== 0.8
)
automl.describe_pipeline(0)
out = caplog.text
assert "Objective to optimize binary classification pipeline thresholds for" in out
def test_optimizable_threshold_disabled(
AutoMLTestEnv,
X_y_binary,
):
X, y = X_y_binary
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
objective="precision",
max_iterations=1,
optimize_thresholds=False,
)
env = AutoMLTestEnv("binary")
with env.test_context(score_return_value={automl.objective.name: 1}):
automl.search()
env.mock_fit.assert_called()
env.mock_score.assert_called()
assert not env.mock_predict_proba.called
assert not env.mock_optimize_threshold.called
assert automl.best_pipeline.threshold == 0.5
assert (
automl.results["pipeline_results"][0]["cv_data"][0].get(
"binary_classification_threshold"
)
== 0.5
)
assert (
automl.results["pipeline_results"][0]["cv_data"][1].get(
"binary_classification_threshold"
)
== 0.5
)
assert (
automl.results["pipeline_results"][0]["cv_data"][2].get(
"binary_classification_threshold"
)
== 0.5
)
def test_non_optimizable_threshold(AutoMLTestEnv, X_y_binary):
X, y = X_y_binary
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
objective="AUC",
optimize_thresholds=False,
max_iterations=1,
)
env = AutoMLTestEnv("binary")
with env.test_context(score_return_value={"AUC": 1}):
automl.search()
env.mock_fit.assert_called()
env.mock_score.assert_called()
assert automl.best_pipeline.threshold is None
assert (
automl.results["pipeline_results"][0]["cv_data"][0].get(
"binary_classification_threshold"
)
is None
)
assert (
automl.results["pipeline_results"][0]["cv_data"][1].get(
"binary_classification_threshold"
)
is None
)
assert (
automl.results["pipeline_results"][0]["cv_data"][2].get(
"binary_classification_threshold"
)
is None
)
def test_describe_pipeline_objective_ordered(X_y_binary, caplog):
X, y = X_y_binary
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
objective="AUC",
max_iterations=2,
n_jobs=1,
)
automl.search()
automl.describe_pipeline(0)
out = caplog.text
out_stripped = " ".join(out.split())
objectives = [get_objective(obj) for obj in automl.additional_objectives]
objectives_names = [obj.name for obj in objectives]
expected_objective_order = " ".join(objectives_names)
assert expected_objective_order in out_stripped
def test_max_time_units(X_y_binary):
X, y = X_y_binary
str_max_time = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
objective="F1",
max_time="60 seconds",
)
assert str_max_time.max_time == 60
hour_max_time = AutoMLSearch(
X_train=X, y_train=y, problem_type="binary", objective="F1", max_time="1 hour"
)
assert hour_max_time.max_time == 3600
min_max_time = AutoMLSearch(
X_train=X, y_train=y, problem_type="binary", objective="F1", max_time="30 mins"
)
assert min_max_time.max_time == 1800
min_max_time = AutoMLSearch(
X_train=X, y_train=y, problem_type="binary", objective="F1", max_time="30 s"
)
assert min_max_time.max_time == 30
with pytest.raises(
AssertionError,
match="Invalid unit. Units must be hours, mins, or seconds. Received 'year'",
):
AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
objective="F1",
max_time="30 years",
)
with pytest.raises(
TypeError,
match="Parameter max_time must be a float, int, string or None. Received <class 'tuple'> with value \\(30, 'minutes'\\).",
):
AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
objective="F1",
max_time=(30, "minutes"),
)
def test_plot_disabled_missing_dependency(X_y_binary, has_minimal_dependencies):
X, y = X_y_binary
automl = AutoMLSearch(X_train=X, y_train=y, problem_type="binary", max_iterations=3)
if has_minimal_dependencies:
with pytest.raises(AttributeError):
automl.plot.search_iteration_plot
else:
automl.plot.search_iteration_plot
def test_plot_iterations_max_iterations(X_y_binary):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y = X_y_binary
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
objective="f1",
max_iterations=3,
n_jobs=1,
)
automl.search()
plot = automl.plot.search_iteration_plot()
plot_data = plot.data[0]
x = | pd.Series(plot_data["x"]) | pandas.Series |
import pandas as pd
import torch
from sklearn.metrics import mean_squared_error
import os
import json
import random
from sklearn.model_selection import train_test_split
from pathlib import Path
import networkx as nx
import dgl
import numpy as np
from sklearn import preprocessing
import pdb
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def normalize_features(X, train_mask, val_mask, test_mask):
min_max_scaler = preprocessing.MinMaxScaler()
A = X.to_numpy(copy=True)
A[train_mask] = min_max_scaler.fit_transform(A[train_mask])
A[val_mask + test_mask] = min_max_scaler.transform(A[val_mask + test_mask])
return pd.DataFrame(A, columns=X.columns).astype(float)
def replace_na(X, train_mask):
if X.isna().any().any():
return X.fillna(X.iloc[train_mask].min() - 1)
return X
def encode_cat_features(X, y, cat_features, train_mask, val_mask, test_mask):
from category_encoders import CatBoostEncoder
enc = CatBoostEncoder()
A = X.to_numpy(copy=True)
b = y.to_numpy(copy=True)
A[np.ix_(train_mask, cat_features)] = enc.fit_transform(A[np.ix_(train_mask, cat_features)], b[train_mask])
A[np.ix_(val_mask + test_mask, cat_features)] = enc.transform(A[np.ix_(val_mask + test_mask, cat_features)])
A = A.astype(float)
return | pd.DataFrame(A, columns=X.columns) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
"""
This script joins the following datasets`claim_vehicle_employee_line.csv`,
`Preventable and Non Preventable_tabDelimited.txt` and `employee_experience_V2.csv`
to create a CSV file that contains the required information for the interactive plot.
It also cleans the resulting CSV file to get a successful result from the Google
Maps API. Assumes `get-data.py` is run before.
Usage: prepare_data.py --claims_file_path=<claims_file_path> --collisions_file_path=<collisions_file_path> --employee_file_path=<employee_file_path> --output_file_path=<output_file_path>
Options:
--claims_file_path=<claims_file_path> A file path for claims dataset that contains information about the claims.
--collisions_file_path=<collisions_file_path> A file path for collisions dataset that contains information about the collisions.
--employee_file_path=<employee_file_path> A file path for employees dataset that contains information about the employees.
--output_file_path=<output_file_path> A file path for resulting joined dataset.
Example:
python src/interactive_map/prepare_data.py \
--claims_file_path "data/TransLink Raw Data/claim_vehicle_employee_line.csv" \
--collisions_file_path "data/TransLink Raw Data/Preventable and Non Preventable_tabDelimited.txt"\
--employee_file_path "data/TransLink Raw Data/employee_experience_V2.csv"\
--output_file_path "results/processed_data/collision_with_claim_and_employee_info.csv"
"""
import pandas as pd
import numpy as np
from docopt import docopt
import glob
import os
import googlemaps
from pathlib import Path
import math
opt = docopt(__doc__)
def create_dirs_if_not_exists(file_path_list):
"""
It creates directories if they don't already exist.
Parameters:
file_path_list (list): A list of paths to be created if they don't exist.
"""
for path in file_path_list:
Path(os.path.dirname(path)).mkdir(parents=True, exist_ok=True)
def compare_loss_date(row):
"""
A helper function to be used in the main function to take
non null values for the loss_date.
"""
if (row['loss_date_x'] is not pd.NaT) & (row['loss_date_y'] is pd.NaT):
val = row.loss_date_x
else:
val = row.loss_date_y
return val
def get_experience_in_months(row):
"""
A helper function to be used in the main functon. It creates the
experiences of the operators in terms of months by using the incident date and
the operators' hiring date.
"""
difference = row['loss_date']- row['hire_date']
return round(difference.days / 30,0)
def main(claims_file_path, collisions_file_path, employee_file_path, output_file_path):
#read the collisions dataset
collision = pd.read_csv(collisions_file_path, delimiter="\t")
collision.columns = map(str.lower, collision.columns)
#take the required columns
collision = collision[['loss_location_at','preventable_nonpreventable', 'loss_location_on',
'city_of_incident', 'loss_date','apta_desc', 'asset_vehicle_year', 'asset_manufacturer',
"claim_id", 'time_of_loss']]
#convert `loss_date` to date_time
collision['loss_date'] = pd.to_datetime(collision['loss_date'], format="%d/%m/%Y")
#read the claims dataset
claims = pd.read_csv(claims_file_path, low_memory=False)
#take the required columns
claims = claims[['claim_id', 'paid_cost$', 'empl_id','day_of_week', 'loss_date',
'claim_status', 'line_no', 'bus_no', 'bus_fuel_type','bus_carry_capacity' ]]
#give a better name for join
claims = claims.rename(columns = {'empl_id': 'employee_id'})
#convert `loss_date` to date_time
claims['loss_date'] = pd.to_datetime(claims['loss_date'], format="%Y-%m-%d")
#read the employees dataset
employee = pd.read_csv(employee_file_path)
#take only the required information
employee = employee[['employee_id', 'hire_date']]
#convert `hire_date` to date_time
employee['hire_date'] = pd.to_datetime(employee['hire_date'], format="%Y-%m-%d")
#first merge claims and employees' information with respect to employee_id column
claims_with_employee = pd.merge(employee, claims, on=['employee_id'], how='right')
# merge the above dataset and collisions information with respect to claim_id column
combined_df = | pd.merge(claims_with_employee, collision, on=['claim_id'], how='left') | pandas.merge |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
to_datetime,
)
import pandas._testing as tm
import pandas.tseries.offsets as offsets
class TestRollingTS:
# rolling time-series friendly
# xref GH13327
def setup_method(self, method):
self.regular = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
).set_index("A")
self.ragged = DataFrame({"B": range(5)})
self.ragged.index = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
def test_doc_string(self):
df = DataFrame(
{"B": [0, 1, 2, np.nan, 4]},
index=[
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
],
)
df
df.rolling("2s").sum()
def test_invalid_window_non_int(self):
# not a valid freq
msg = "passed window foobar is not compatible with a datetimelike index"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="foobar")
# not a datetimelike index
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
self.regular.reset_index().rolling(window="foobar")
@pytest.mark.parametrize("freq", ["2MS", offsets.MonthBegin(2)])
def test_invalid_window_nonfixed(self, freq):
# non-fixed freqs
msg = "\\<2 \\* MonthBegins\\> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window=freq)
@pytest.mark.parametrize("freq", ["1D", offsets.Day(2), "2ms"])
def test_valid_window(self, freq):
self.regular.rolling(window=freq)
@pytest.mark.parametrize("minp", [1.0, "foo", np.array([1, 2, 3])])
def test_invalid_minp(self, minp):
# non-integer min_periods
msg = (
r"local variable 'minp' referenced before assignment|"
"min_periods must be an integer"
)
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="1D", min_periods=minp)
def test_invalid_center_datetimelike(self):
# center is not implemented
msg = "center is not implemented for datetimelike and offset based windows"
with pytest.raises(NotImplementedError, match=msg):
self.regular.rolling(window="1D", center=True)
def test_on(self):
df = self.regular
# not a valid column
msg = (
r"invalid on specified as foobar, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling(window="2s", on="foobar")
# column is valid
df = df.copy()
df["C"] = date_range("20130101", periods=len(df))
df.rolling(window="2d", on="C").sum()
# invalid columns
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
df.rolling(window="2d", on="B")
# ok even though on non-selected
df.rolling(window="2d", on="C").B.sum()
def test_monotonic_on(self):
# on/index must be monotonic
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
assert df.A.is_monotonic
df.rolling("2s", on="A").sum()
df = df.set_index("A")
assert df.index.is_monotonic
df.rolling("2s").sum()
def test_non_monotonic_on(self):
# GH 19248
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
df = df.set_index("A")
non_monotonic_index = df.index.to_list()
non_monotonic_index[0] = non_monotonic_index[3]
df.index = non_monotonic_index
assert not df.index.is_monotonic
msg = "index must be monotonic"
with pytest.raises(ValueError, match=msg):
df.rolling("2s").sum()
df = df.reset_index()
msg = (
r"invalid on specified as A, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling("2s", on="A").sum()
def test_frame_on(self):
df = DataFrame(
{"B": range(5), "C": date_range("20130101 09:00:00", periods=5, freq="3s")}
)
df["A"] = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
# we are doing simulating using 'on'
expected = df.set_index("A").rolling("2s").B.sum().reset_index(drop=True)
result = df.rolling("2s", on="A").B.sum()
tm.assert_series_equal(result, expected)
# test as a frame
# we should be ignoring the 'on' as an aggregation column
# note that the expected is setting, computing, and resetting
# so the columns need to be switched compared
# to the actual result where they are ordered as in the
# original
expected = (
df.set_index("A").rolling("2s")[["B"]].sum().reset_index()[["B", "A"]]
)
result = df.rolling("2s", on="A")[["B"]].sum()
tm.assert_frame_equal(result, expected)
def test_frame_on2(self):
# using multiple aggregation columns
df = DataFrame(
{
"A": [0, 1, 2, 3, 4],
"B": [0, 1, 2, np.nan, 4],
"C": Index(
[
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
),
},
columns=["A", "C", "B"],
)
expected1 = DataFrame(
{"A": [0.0, 1, 3, 3, 7], "B": [0, 1, 3, np.nan, 4], "C": df["C"]},
columns=["A", "C", "B"],
)
result = df.rolling("2s", on="C").sum()
expected = expected1
tm.assert_frame_equal(result, expected)
expected = Series([0, 1, 3, np.nan, 4], name="B")
result = df.rolling("2s", on="C").B.sum()
tm.assert_series_equal(result, expected)
expected = expected1[["A", "B", "C"]]
result = df.rolling("2s", on="C")[["A", "B", "C"]].sum()
tm.assert_frame_equal(result, expected)
def test_basic_regular(self):
df = self.regular.copy()
df.index = date_range("20130101", periods=5, freq="D")
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="1D").sum()
tm.assert_frame_equal(result, expected)
df.index = date_range("20130101", periods=5, freq="2D")
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="2D", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="2D", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1).sum()
result = df.rolling(window="2D").sum()
tm.assert_frame_equal(result, expected)
def test_min_periods(self):
# compare for min_periods
df = self.regular
# these slightly different
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling("2s").sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling("2s", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
def test_closed(self):
# xref GH13965
df = DataFrame(
{"A": [1] * 5},
index=[
Timestamp("20130101 09:00:01"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:04"),
Timestamp("20130101 09:00:06"),
],
)
# closed must be 'right', 'left', 'both', 'neither'
msg = "closed must be 'right', 'left', 'both' or 'neither'"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="2s", closed="blabla")
expected = df.copy()
expected["A"] = [1.0, 2, 2, 2, 1]
result = df.rolling("2s", closed="right").sum()
tm.assert_frame_equal(result, expected)
# default should be 'right'
result = df.rolling("2s").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [1.0, 2, 3, 3, 2]
result = df.rolling("2s", closed="both").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 2, 2, 1]
result = df.rolling("2s", closed="left").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 1, 1, np.nan]
result = df.rolling("2s", closed="neither").sum()
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# coding: utf8
import torch
import pandas as pd
import numpy as np
from os import path
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import abc
from clinicadl.tools.inputs.filename_types import FILENAME_TYPE
import os
import nibabel as nib
import torch.nn.functional as F
from scipy import ndimage
import socket
from utils import get_dynamic_image
from .batchgenerators.transforms.color_transforms import ContrastAugmentationTransform, BrightnessTransform, \
GammaTransform, BrightnessGradientAdditiveTransform, LocalSmoothingTransform
from .batchgenerators.transforms.crop_and_pad_transforms import CenterCropTransform, RandomCropTransform, \
RandomShiftTransform
from .batchgenerators.transforms.noise_transforms import RicianNoiseTransform, GaussianNoiseTransform, \
GaussianBlurTransform
from .batchgenerators.transforms.spatial_transforms import Rot90Transform, MirrorTransform, SpatialTransform
from .batchgenerators.transforms.abstract_transforms import Compose
from .batchgenerators.dataloading.multi_threaded_augmenter import MultiThreadedAugmenter
from .data_tool import hilbert_2dto3d_cut, hilbert_3dto2d_cut, hilbert_2dto3d, hilbert_3dto2d, linear_2dto3d_cut, \
linear_3dto2d_cut, linear_2dto3d, linear_3dto2d
#################################
# Datasets loaders
#################################
class MRIDataset(Dataset):
"""Abstract class for all derived MRIDatasets."""
def __init__(self, caps_directory, data_file,
preprocessing, transformations=None):
self.caps_directory = caps_directory
self.transformations = transformations
self.diagnosis_code = {
'CN': 0,
'AD': 1,
'sMCI': 0,
'pMCI': 1,
'MCI': 2,
'unlabeled': -1}
self.preprocessing = preprocessing
self.num_fake_mri = 0
if not hasattr(self, 'elem_index'):
raise ValueError(
"Child class of MRIDataset must set elem_index attribute.")
if not hasattr(self, 'mode'):
raise ValueError(
"Child class of MRIDataset must set mode attribute.")
# Check the format of the tsv file here
if isinstance(data_file, str):
self.df = pd.read_csv(data_file, sep='\t')
elif isinstance(data_file, pd.DataFrame):
self.df = data_file
else:
raise Exception('The argument data_file is not of correct type.')
mandatory_col = {"participant_id", "session_id", "diagnosis"}
if self.elem_index == "mixed":
mandatory_col.add("%s_id" % self.mode)
if not mandatory_col.issubset(set(self.df.columns.values)):
raise Exception("the data file is not in the correct format."
"Columns should include %s" % mandatory_col)
self.elem_per_image = self.num_elem_per_image()
def __len__(self):
return len(self.df) * self.elem_per_image
def _get_path(self, participant, session, mode="image", fake_caps_path=None):
if self.preprocessing == "t1-linear":
image_path = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_linear',
participant + '_' + session
+ FILENAME_TYPE['cropped'] + '.pt')
origin_nii_path = path.join(self.caps_directory, 'subjects', participant, session,
't1_linear', participant + '_' + session
+ FILENAME_TYPE['cropped'] + '.nii.gz')
# temp_path = path.join(self.caps_directory, 'subjects', participant, session,
# 't1_linear')
# for file in os.listdir(temp_path):
# if file.find('_run-01_') != '-1':
# new_name = file.replace('_run-01_', '_')
# os.rename(os.path.join(temp_path, file), os.path.join(temp_path, new_name))
# print('rename {} to {}'.format(os.path.join(temp_path, file), os.path.join(temp_path, new_name)))
if fake_caps_path is not None:
fake_image_path = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['cropped'] + '.pt')
fake_nii_path = path.join(fake_caps_path, 'subjects', participant, session,
't1_linear', participant + '_' + session
+ FILENAME_TYPE['cropped'] + '.nii.gz')
# first use fake image, because some image lacked in tsv but have in caps
if os.path.exists(fake_image_path):
image_path = fake_image_path
self.num_fake_mri = self.num_fake_mri + 1
elif os.path.exists(fake_nii_path):
image_array = nib.load(fake_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), fake_image_path)
print('save fake image: {}'.format(fake_image_path))
self.num_fake_mri = self.num_fake_mri + 1
image_path = fake_image_path
elif os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real nii file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print(
'Can not find:{} and {} and {} in both real and fake folder'.format(image_path, fake_image_path,
fake_nii_path))
else:
if os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_linear')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print('Can not find:{}'.format(image_path))
elif self.preprocessing == "t1-extensive":
image_path = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_extensive',
participant + '_' + session
+ FILENAME_TYPE['skull_stripped'] + '.pt')
elif self.preprocessing == "t1-spm-graymatter":
image_path = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['segm-graymatter'] + '.pt')
origin_nii_path = path.join(self.caps_directory, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space', participant + '_' + session
+ FILENAME_TYPE['segm-graymatter'] + '.nii.gz')
temp_path = path.join(self.caps_directory, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space')
# for file in os.listdir(temp_path):
# if file.find('_run-01_') != '-1':
# new_name = file.replace('_run-01_', '_')
# os.rename(os.path.join(temp_path, file), os.path.join(temp_path, new_name))
# print('rename {} to {}'.format(os.path.join(temp_path, file), os.path.join(temp_path, new_name)))
if fake_caps_path is not None:
fake_image_path = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['segm-graymatter'] + '.pt')
fake_nii_path = path.join(fake_caps_path, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space', participant + '_' + session
+ FILENAME_TYPE['segm-graymatter'] + '.nii.gz')
# first use fake image, because some image lacked in tsv but have in caps
if os.path.exists(fake_image_path):
image_path = fake_image_path
self.num_fake_mri = self.num_fake_mri + 1
elif os.path.exists(fake_nii_path):
image_array = nib.load(fake_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), fake_image_path)
print('save fake image: {}'.format(fake_image_path))
self.num_fake_mri = self.num_fake_mri + 1
image_path = fake_image_path
elif os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print(
'Can not find:{} and {} and {} in both real and fake folder'.format(image_path, fake_image_path,
fake_nii_path))
else:
if os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print('Can not find:{}'.format(image_path))
print('Can not find:{}'.format(origin_nii_path))
elif self.preprocessing == "t1-spm-whitematter":
image_path = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['segm-whitematter'] + '.pt')
origin_nii_path = path.join(self.caps_directory, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space', participant + '_' + session
+ FILENAME_TYPE['segm-whitematter'] + '.nii.gz')
if fake_caps_path is not None:
fake_image_path = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['segm-whitematter'] + '.pt')
fake_nii_path = path.join(fake_caps_path, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space', participant + '_' + session
+ FILENAME_TYPE['segm-whitematter'] + '.nii.gz')
# first use fake image, because some image lacked in tsv but have in caps
if os.path.exists(fake_image_path):
image_path = fake_image_path
self.num_fake_mri = self.num_fake_mri + 1
elif os.path.exists(fake_nii_path):
image_array = nib.load(fake_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), fake_image_path)
image_path = fake_image_path
print('save fake image: {}'.format(fake_image_path))
self.num_fake_mri = self.num_fake_mri + 1
elif os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print('Can not find:{}'.format(image_path))
else:
if os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print('Can not find:{}'.format(image_path))
elif self.preprocessing == "t1-spm-csf":
image_path = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['segm-csf'] + '.pt')
origin_nii_path = path.join(self.caps_directory, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space', participant + '_' + session
+ FILENAME_TYPE['segm-csf'] + '.nii.gz')
if fake_caps_path is not None:
fake_image_path = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['segm-csf'] + '.pt')
fake_nii_path = path.join(fake_caps_path, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space', participant + '_' + session
+ FILENAME_TYPE['segm-csf'] + '.nii.gz')
# first use fake image, because some image lacked in tsv but have in caps
if os.path.exists(fake_image_path):
image_path = fake_image_path
self.num_fake_mri = self.num_fake_mri + 1
elif os.path.exists(fake_nii_path):
image_array = nib.load(fake_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), fake_image_path)
image_path = fake_image_path
print('save fake image: {}'.format(fake_image_path))
self.num_fake_mri = self.num_fake_mri + 1
elif os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print('Can not find:{}'.format(image_path))
else:
if os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print('Can not find:{}'.format(image_path))
return image_path
def _get_meta_data(self, idx):
image_idx = idx // self.elem_per_image
participant = self.df.loc[image_idx, 'participant_id']
session = self.df.loc[image_idx, 'session_id']
if self.elem_index is None:
elem_idx = idx % self.elem_per_image
elif self.elem_index == "mixed":
elem_idx = self.df.loc[image_idx, '%s_id' % self.mode]
else:
elem_idx = self.elem_index
diagnosis = self.df.loc[image_idx, 'diagnosis']
label = self.diagnosis_code[diagnosis]
return participant, session, elem_idx, label
def _get_full_image(self):
from ..data.utils import find_image_path as get_nii_path
import nibabel as nib
if self.preprocessing in ["t1-linear", "t1-extensive"]:
participant_id = self.df.loc[0, 'participant_id']
session_id = self.df.loc[0, 'session_id']
try:
image_path = self._get_path(participant_id, session_id, "image")
image = torch.load(image_path)
except FileNotFoundError:
try:
image_path = get_nii_path(
self.caps_directory,
participant_id,
session_id,
preprocessing=self.preprocessing)
image_nii = nib.load(image_path)
image_np = image_nii.get_fdata()
image = ToTensor()(image_np)
except:
# if we use moved folder which only has slice/patch, we can not find the whole image in folder, so use this file to get full image
# image_path = os.path.join(self.caps_directory,'sub-ADNI002S0295_ses-M00_T1w_space-MNI152NLin2009cSym_desc-Crop_res-1x1x1_T1w.nii.gz')
# image_nii = nib.load(image_path)
# image_np = image_nii.get_fdata()
# image = ToTensor()(image_np)
image = torch.zeros([169, 208, 179]) # in those segm data, size : [169, 208, 179]
elif self.preprocessing in ["t1-spm-whitematter", "t1-spm-whitematter", "t1-spm-csf"]:
image = torch.zeros([121, 145, 121]) # in those segm data, size : [121, 145, 121]
return image
@abc.abstractmethod
def __getitem__(self, idx):
pass
@abc.abstractmethod
def num_elem_per_image(self):
pass
class MRIDatasetImage(MRIDataset):
"""Dataset of MRI organized in a CAPS folder."""
def __init__(self, caps_directory, data_file,
preprocessing='t1-linear', transformations=None, crop_padding_to_128=False, resample_size=None,
fake_caps_path=None, roi=False, roi_size=32, model=None, data_preprocess='MinMax',
data_Augmentation=False, method_2d=None):
"""
Args:
caps_directory (string): Directory of all the images.
data_file (string or DataFrame): Path to the tsv file or DataFrame containing the subject/session list.
preprocessing (string): Defines the path to the data in CAPS.
transformations (callable, optional): Optional transform to be applied on a sample.
"""
self.elem_index = None
self.mode = "image"
self.model = model
self.data_preprocess = data_preprocess
self.data_Augmentation = data_Augmentation
self.crop_padding_to_128 = crop_padding_to_128
self.resample_size = resample_size
self.fake_caps_path = fake_caps_path
self.roi = roi
self.roi_size = roi_size
self.method_2d = method_2d
# if self.roi:
# if socket.gethostname() == 'zkyd':
# aal_mask_dict_dir = '/root/Downloads/atlas/aal_mask_dict_128.npy'
# elif socket.gethostname() == 'tian-W320-G10':
# aal_mask_dict_dir = '/home/tian/pycharm_project/MRI_GNN/atlas/aal_mask_dict_128.npy'
# self.aal_mask_dict = np.load(aal_mask_dict_dir, allow_pickle=True).item() # 116; (181,217,181)
super().__init__(caps_directory, data_file, preprocessing, transformations)
print('crop_padding_to_128 type:{}'.format(self.crop_padding_to_128))
def __getitem__(self, idx):
participant, session, _, label = self._get_meta_data(idx)
image_path = self._get_path(participant, session, "image", fake_caps_path=self.fake_caps_path)
if self.preprocessing == 't1-linear':
ori_name = 't1_linear'
else:
ori_name = 't1_spm'
resampled_image_path = image_path.replace(ori_name, '{}_{}_resample_{}'.format(ori_name, self.data_preprocess,
self.resample_size))
CNN2020_DEEPCNN_image_path = image_path.replace(ori_name,
'{}_{}_model_{}'.format(ori_name, self.data_preprocess,
self.model))
roi_image_path = resampled_image_path.replace('image_based',
'AAL_roi_based_{}'.format(self.roi_size))
# delate_image_path = image_path.replace('image_based',
# 'AAL_roi_based_{}'.format(self.roi_size))
# if os.path.exists(delate_image_path):
# os.remove(delate_image_path)
# print('delating:{}'.format(delate_image_path))
if not self.data_Augmentation: # No data_Augmentation, 1. check local disk whether have saved data. 2. If not, process data and save to desk
if self.roi and 'ROI' in self.model:
# Get resampled_image
if os.path.exists(resampled_image_path):
try:
resampled_image = torch.load(resampled_image_path)
# print('loading:{}'.format(roi_image_path))
except:
print('Wrong file:{}'.format(resampled_image_path))
else:
image = torch.load(image_path)
if self.transformations:
dict = {}
dict['data'] = image
resampled_image = self.transformations(begin_trans_indx=0, **dict)
resampled_data = resampled_image.squeeze() # [128, 128, 128]
try:
resampled_image = resampled_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
resampled_image = np.expand_dims(resampled_data, 0)
dir, file = os.path.split(resampled_image_path)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
pass
torch.save(resampled_image, resampled_image_path)
print('Save resampled {} image: {}'.format(self.resample_size, resampled_image_path))
# Get roi image
if os.path.exists(roi_image_path):
try:
ROI_image = torch.load(roi_image_path)
# print('loading:{}'.format(roi_image_path))
except:
print('Wrong file:{}'.format(roi_image_path))
else:
image = torch.load(image_path)
if self.transformations:
dict = {}
dict['data'] = image
image = self.transformations(begin_trans_indx=0, **dict)
data = image.squeeze() # [128, 128, 128]
data = self.roi_extract(data, roi_size=self.roi_size, sub_id=participant,
preprocessing=self.preprocessing,
session=session, save_nii=False)
ROI_image = data.unsqueeze(dim=0) # [1, num_roi, 128, 128, 128]
# sample = {'image': image, 'roi_image': ROI_image, 'label': label, 'participant_id': participant,
# 'session_id': session,
# 'image_path': image_path, 'num_fake_mri': self.num_fake_mri}
dir, file = os.path.split(roi_image_path)
if not os.path.exists(dir):
os.makedirs(dir)
torch.save(ROI_image, roi_image_path)
print('Save roi image: {}'.format(roi_image_path))
sample = {'image': ROI_image, 'label': label, 'participant_id': participant,
'session_id': session, 'all_image': resampled_image,
'image_path': roi_image_path, 'num_fake_mri': self.num_fake_mri}
elif self.model in ["CNN2020", "DeepCNN"]:
if os.path.exists(CNN2020_DEEPCNN_image_path):
CNN2020_DEEPCNN_image_image = torch.load(CNN2020_DEEPCNN_image_path)
else:
image = torch.load(image_path)
if self.transformations:
dict = {}
dict['data'] = image
image = self.transformations(begin_trans_indx=0, **dict)
data = image.squeeze() # [128, 128, 128]
try:
CNN2020_DEEPCNN_image_image = data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
CNN2020_DEEPCNN_image_image = np.expand_dims(data, 0)
dir, file = os.path.split(CNN2020_DEEPCNN_image_path)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
pass
torch.save(image, CNN2020_DEEPCNN_image_path)
print('Save resampled {} image: {}'.format(self.resample_size, CNN2020_DEEPCNN_image_path))
sample = {'image': CNN2020_DEEPCNN_image_image, 'label': label, 'participant_id': participant,
'session_id': session,
'image_path': CNN2020_DEEPCNN_image_path, 'num_fake_mri': self.num_fake_mri}
elif self.method_2d is not None:
path, file = os.path.split(resampled_image_path)
file_2d = file.split('.')[0] + '_' + self.method_2d + '.' + file.split('.')[1]
path_2d = os.path.join(path, file_2d)
if os.path.exists(path_2d):
try:
data_2d = torch.load(path_2d)
except:
print('Wrong file:{}'.format(path_2d))
else:
if os.path.exists(resampled_image_path):
try:
resampled_image = torch.load(resampled_image_path)
# print('loading:{}'.format(roi_image_path))
except:
print('Wrong file:{}'.format(resampled_image_path))
else:
image = torch.load(image_path)
if self.transformations:
dict = {}
dict['data'] = image
resampled_image = self.transformations(begin_trans_indx=0, **dict)
resampled_data = resampled_image.squeeze() # [128, 128, 128]
try:
resampled_image = resampled_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
resampled_image = np.expand_dims(resampled_data, 0)
dir, file = os.path.split(resampled_image_path)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
pass
torch.save(resampled_image, resampled_image_path)
print('Save resampled {} image: {}'.format(self.resample_size, resampled_image_path))
if self.method_2d == 'hilbert_cut':
data_2d = hilbert_3dto2d_cut(resampled_image)
torch.save(data_2d.clone(), path_2d)
print('saving:{}'.format(path_2d))
elif self.method_2d == 'linear_cut':
data_2d = linear_3dto2d_cut(resampled_image)
torch.save(data_2d.clone(), path_2d)
print('saving:{}'.format(path_2d))
elif self.method_2d == 'hilbert_downsampling':
data_low = self.__resize_data__(resampled_image.squeeze(), target_size=[64, 64, 64])
data_low = torch.from_numpy(data_low)
data_2d = hilbert_3dto2d(data_low)
data_2d = data_2d.unsqueeze(0) # [1,512,512]
torch.save(data_2d.clone(), path_2d)
print('saving:{}'.format(path_2d))
elif self.method_2d == 'linear_downsampling':
data_low = self.__resize_data__(resampled_image.squeeze(), target_size=[64, 64, 64])
data_low = torch.from_numpy(data_low)
data_2d = linear_3dto2d(data_low)
data_2d = data_2d.unsqueeze(0) # [1,512,512]
torch.save(data_2d.clone(), path_2d)
print('saving:{}'.format(path_2d))
sample = {'image': data_2d.squeeze(), 'label': label, 'participant_id': participant,
'session_id': session,
'image_path': path_2d, 'num_fake_mri': self.num_fake_mri}
elif self.model not in [
"Conv5_FC3",
'DeepCNN',
'CNN2020',
'CNN2020_gcn',
'DeepCNN_gcn',
"Dynamic2D_net_Alex",
"Dynamic2D_net_Res34",
"Dynamic2D_net_Res18",
"Dynamic2D_net_Vgg16",
"Dynamic2D_net_Vgg11",
"Dynamic2D_net_Mobile",
'ROI_GCN']:
if os.path.exists(resampled_image_path):
try:
resampled_image = torch.load(resampled_image_path)
except:
raise FileExistsError('file error:{}'.format(resampled_image_path))
# if self.data_Augmentation and self.transformations:
# dict = {}
# dict['data'] = resampled_image
# begin_trans_indx = 0
# for i in range(len(self.transformations.transforms)):
# if self.transformations.transforms[i].__class__.__name__ in ['ItensityNormalizeNonzeorVolume',
# 'ItensityNormalizeNonzeorVolume',
# 'MinMaxNormalization']:
# begin_trans_indx = i + 1
# resampled_image = self.transformations(begin_trans_indx=begin_trans_indx, **dict)
else:
image = torch.load(image_path)
if self.transformations:
dict = {}
dict['data'] = image
resampled_image = self.transformations(begin_trans_indx=0, **dict)
resampled_data = resampled_image.squeeze() # [128, 128, 128]
try:
resampled_image = resampled_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
resampled_image = np.expand_dims(resampled_data, 0)
dir, file = os.path.split(resampled_image_path)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
pass
torch.save(image, resampled_image_path)
print('Save resampled {} image: {}'.format(self.resample_size, resampled_image_path))
sample = {'image': resampled_image, 'label': label, 'participant_id': participant,
'session_id': session,
'image_path': resampled_image_path, 'num_fake_mri': self.num_fake_mri}
elif self.model in ["Dynamic2D_net_Alex", "Dynamic2D_net_Res34", "Dynamic2D_net_Res18",
"Dynamic2D_net_Vgg16", "Dynamic2D_net_Vgg11", "Dynamic2D_net_Mobile"]:
image = torch.load(image_path)
if self.transformations:
dict = {}
dict['data'] = image
resampled_image = self.transformations(begin_trans_indx=0, **dict)
resampled_data = resampled_image.squeeze() # [128, 128, 128]
image_np = np.array(resampled_data)
image_np = np.expand_dims(image_np, 0) # 0,w,h,d
image_np = np.swapaxes(image_np, 0, 3) # w,h,d,0
im = get_dynamic_image(image_np)
im = np.expand_dims(im, 0)
im = np.concatenate([im, im, im], 0)
im = torch.from_numpy(im)
im = im.float()
sample = {'image': im, 'label': label, 'participant_id': participant, 'session_id': session,
'image_path': image_path, 'num_fake_mri': self.num_fake_mri}
return sample
return sample
else: # Use data_Augmentation, 1. Just load original data and process it
# 1. load original data
image = torch.load(image_path)
if self.transformations: # Augmentation
dict = {}
dict['data'] = image
image = self.transformations(begin_trans_indx=0, **dict)
augmentation_data = image.squeeze() # [128, 128, 128]
# print(self.transformations)
# print(self.transformations[0])
# if self.crop_padding_to_128 and image.shape[1] != 128:
# image = image[:, :, 8:-9, :] # [1, 121, 128, 121]
# image = image.unsqueeze(0) # [1, 1, 121, 128, 121]
# pad = torch.nn.ReplicationPad3d((4, 3, 0, 0, 4, 3))
# image = pad(image) # [1, 1, 128, 128, 128]
# image = image.squeeze(0) # [1, 128, 128, 128]
# if self.resample_size is not None:
# assert self.resample_size > 0, 'resample_size should be a int positive number'
# image = image.unsqueeze(0)
# image = F.interpolate(image,
# size=self.resample_size) # resize to resample_size * resample_size * resample_size
# print('resample before trans shape:{}'.format(image.shape))
# print('resample before trans mean:{}'.format(image.mean()))
# print('resample before trans std:{}'.format(image.std()))
# print('resample before trans max:{}'.format(image.max()))
# print('resample before trans min:{}'.format(image.min()))
# # image = self.transformations(image)
# # print('resample after trans shape:{}'.format(image.shape))
# # print('resample after trans mean:{}'.format(image.mean()))
# # print('resample after trans std:{}'.format(image.std()))
# # print('resample after trans max:{}'.format(image.max()))
# # print('resample after trans min:{}'.format(image.min()))
# image = image.squeeze(0)
#
# if self.model in ['DeepCNN', 'DeepCNN_gcn']:
# image = image.unsqueeze(0)
# image = F.interpolate(image, size=[49, 39, 38])
# image = image.squeeze(0)
# elif self.model in ['CNN2020', 'CNN2020_gcn']:
# image = image.unsqueeze(0)
# image = F.interpolate(image, size=[139, 177, 144])
# image = image.squeeze(0)
# # preprocessing data
# data = image.squeeze() # [128, 128, 128]
# # print(data.shape)
# input_W, input_H, input_D = data.shape
# if self.model not in ["ConvNet3D", "ConvNet3D_gcn", "VoxCNN", "Conv5_FC3", 'DeepCNN', 'CNN2020', 'CNN2020_gcn',
# "VoxCNN_gcn", 'DeepCNN_gcn', "ConvNet3D_v2", "ConvNet3D_ori", "Dynamic2D_net_Alex",
# "Dynamic2D_net_Res34", "Dynamic2D_net_Res18", "Dynamic2D_net_Vgg16",
# "Dynamic2D_net_Vgg11", "Dynamic2D_net_Mobile"]:
# # drop out the invalid range
# # if self.preprocessing in ['t1-spm-graymatter', 't1-spm-whitematter', 't1-spm-csf']:
# data = self.__drop_invalid_range__(data)
# print('drop_invalid_range shape:{}'.format(data.shape))
# print('drop_invalid_range mean:{}'.format(data.mean()))
# print('drop_invalid_range std:{}'.format(data.std()))
# print('drop_invalid_range max:{}'.format(data.max()))
# print('drop_invalid_range min:{}'.format(data.min()))
# # resize data
# data = self.__resize_data__(data, input_W, input_H, input_D)
# print('resize_data shape:{}'.format(data.shape))
# print('resize_data mean:{}'.format(data.mean()))
# print('resize_data std:{}'.format(data.std()))
# print('resize_data max:{}'.format(data.max()))
# print('resize_data min:{}'.format(data.min()))
# # normalization datas
# data = np.array(data)
# data = self.__itensity_normalize_one_volume__(data)
# print('itensity_normalize shape:{}'.format(data.shape))
# print('itensity_normalize mean:{}'.format(data.mean()))
# print('itensity_normalize std:{}'.format(data.std()))
# print('itensity_normalize max:{}'.format(data.max()))
# print('itensity_normalize min:{}'.format(data.min()))
# # if self.transformations and self.model in ["ConvNet3D", "VoxCNN"]:
# # data = self.transformations(data)
# data = torch.from_numpy(data)
# if self.model in ['CNN2020', 'CNN2020_gcn']:
# data = np.array(data)
# data = self.__itensity_normalize_one_volume__(data, normalize_all=True)
# data = torch.from_numpy(data)
if self.model in ["Dynamic2D_net_Alex", "Dynamic2D_net_Res34", "Dynamic2D_net_Res18",
"Dynamic2D_net_Vgg16", "Dynamic2D_net_Vgg11", "Dynamic2D_net_Mobile"]:
image_np = np.array(augmentation_data)
image_np = np.expand_dims(image_np, 0) # 0,w,h,d
image_np = np.swapaxes(image_np, 0, 3) # w,h,d,0
im = get_dynamic_image(image_np)
im = np.expand_dims(im, 0)
im = np.concatenate([im, im, im], 0)
im = torch.from_numpy(im)
im = im.float()
sample = {'image': im, 'label': label, 'participant_id': participant, 'session_id': session,
'image_path': image_path, 'num_fake_mri': self.num_fake_mri}
return sample
if self.roi and 'ROI' in self.model:
try:
resampled_image = augmentation_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
resampled_image = np.expand_dims(augmentation_data, 0)
augmentation_data = self.roi_extract(augmentation_data, roi_size=self.roi_size, sub_id=participant,
preprocessing=self.preprocessing,
session=session, save_nii=False)
ROI_image = augmentation_data.unsqueeze(dim=0) # [1, num_roi, 128, 128, 128]
sample = {'image': ROI_image, 'all_image': resampled_image, 'label': label,
'participant_id': participant,
'session_id': session,
'image_path': image_path, 'num_fake_mri': self.num_fake_mri}
elif self.method_2d is not None:
path, file = os.path.split(resampled_image_path)
file_2d = file.split('.')[0] + '_' + self.method_2d + '.' + file.split('.')[1]
path_2d = os.path.join(path, file_2d)
try:
resampled_image = augmentation_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
resampled_image = np.expand_dims(augmentation_data, 0)
if self.method_2d == 'hilbert_cut':
data_2d = hilbert_3dto2d_cut(resampled_image)
elif self.method_2d == 'linear_cut':
data_2d = linear_3dto2d_cut(resampled_image)
elif self.method_2d == 'hilbert_downsampling':
data_low = self.__resize_data__(resampled_image.squeeze(), target_size=[64, 64, 64])
data_low = torch.from_numpy(data_low)
data_2d = hilbert_3dto2d(data_low)
data_2d = data_2d.unsqueeze(0) # [1,512,512]
elif self.method_2d == 'linear_downsampling':
data_low = self.__resize_data__(resampled_image.squeeze(), target_size=[64, 64, 64])
data_low = torch.from_numpy(data_low)
data_2d = linear_3dto2d(data_low)
data_2d = data_2d.unsqueeze(0) # [1,512,512]
sample = {'image': data_2d.squeeze(), 'label': label, 'participant_id': participant,
'session_id': session,
'image_path': path_2d, 'num_fake_mri': self.num_fake_mri}
elif self.model in ["CNN2020", "DeepCNN"]:
try:
CNN2020_DEEPCNN_image_image = augmentation_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
CNN2020_DEEPCNN_image_image = np.expand_dims(augmentation_data, 0)
dir, file = os.path.split(CNN2020_DEEPCNN_image_path)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
pass
torch.save(image, CNN2020_DEEPCNN_image_path)
print('Save resampled {} image: {}'.format(self.resample_size, CNN2020_DEEPCNN_image_path))
sample = {'image': CNN2020_DEEPCNN_image_image, 'label': label, 'participant_id': participant,
'session_id': session,
'image_path': CNN2020_DEEPCNN_image_path, 'num_fake_mri': self.num_fake_mri}
else:
try:
image = augmentation_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
image = np.expand_dims(augmentation_data, 0)
sample = {'image': image, 'label': label, 'participant_id': participant, 'session_id': session,
'image_path': image_path, 'num_fake_mri': self.num_fake_mri}
return sample
def __drop_invalid_range__(self, volume):
"""
Cut off the invalid area
"""
zero_value = volume[0, 0, 0]
# print('zero:{}'.format(zero_value))
non_zeros_idx = np.where(volume != zero_value)
# print('zero idx:{}'.format(non_zeros_idx))
try:
[max_z, max_h, max_w] = np.max(np.array(non_zeros_idx), axis=1)
[min_z, min_h, min_w] = np.min(np.array(non_zeros_idx), axis=1)
except:
print(zero_value)
print(non_zeros_idx)
return volume[min_z:max_z + 1, min_h:max_h + 1, min_w:max_w + 1]
def __resize_data__(self, data, input_W, input_H, input_D):
"""
Resize the data to the input size
"""
[depth, height, width] = data.shape
scale = [input_W * 1.0 / depth, input_H * 1.0 / height, input_D * 1.0 / width]
data = ndimage.interpolation.zoom(data, scale, order=0)
return data
def __itensity_normalize_one_volume__(self, volume, normalize_all=False):
"""
normalize the itensity of an nd volume based on the mean and std of nonzeor region
inputs:
volume: the input nd volume
outputs:
out: the normalized nd volume
"""
if normalize_all:
pixels = volume
else:
pixels = volume[volume > 0]
mean = pixels.mean()
std = pixels.std()
out = (volume - mean) / std
if not normalize_all:
out_random = np.random.normal(0, 1, size=volume.shape)
out[volume == 0] = out_random[volume == 0]
return out
def num_elem_per_image(self):
return 1
def roi_extract(self, MRI, roi_size=32, sub_id=None, preprocessing=None, session=None, save_nii=False):
roi_data_list = []
roi_label_list = []
if 'slave' in socket.gethostname():
aal_mask_dict_dir = '/root/Downloads/atlas/aal_mask_dict_right.npy'
elif socket.gethostname() == 'tian-W320-G10':
aal_mask_dict_dir = '/home/tian/pycharm_project/MRI_GNN/atlas/aal_mask_dict_right.npy'
elif socket.gethostname() == 'zkyd':
aal_mask_dict_dir = '/data/fanchenchen/atlas/aal_mask_dict_right.npy'
self.aal_mask_dict = np.load(aal_mask_dict_dir, allow_pickle=True).item() # 116; (181,217,181)
for i, key in enumerate(self.aal_mask_dict.keys()):
# useful_data = self.__drop_invalid_range__(self.aal_mask_dict[key])
# useful_data = resize_data(useful_data, target_size=[128, 128, 128])
# useful_data = useful_data[np.newaxis, np.newaxis, :, :, :] # 1,1,128,128,128
# roi_batch_data = MRI.cpu().numpy() * useful_data # batch, 1, 128,128,128
mask = self.aal_mask_dict[key]
# print('mask min:{}'.format(mask.min()))
# print('mask max:{}'.format(mask.max()))
# print('mask:{}'.format(mask))
ww, hh, dd = MRI.shape
MRI = self.__resize_data__(MRI, 181, 217, 181)
# MRI = (MRI - MRI.min()) / (MRI.max() - MRI.min())
roi_data = MRI * mask.squeeze() # batch, 1, 128,128,128
# print('roi_data min:{}'.format(roi_data.min()))
# print('roi_data max:{}'.format(roi_data.max()))
roi_label_list.append(key)
# save nii to Visualization
# print(image_np.max())
# print(image_np.min())
# print(roi_data.shape)
if save_nii:
image_nii = nib.Nifti1Image(roi_data, np.eye(4))
MRI_path = '/data/fanchenchen/atlas/{}_{}_{}_ori_roi_{}.nii.gz'.format(sub_id, session,
preprocessing, i)
nib.save(image_nii, MRI_path)
try:
roi_data = self.__drop_invalid_range__(roi_data) # xx,xx,xx
except:
print(sub_id)
print(session)
assert True
# roi_data = self.__drop_invalid_range__(mask.squeeze()) # xx,xx,xx
if save_nii:
image_nii = nib.Nifti1Image(roi_data, np.eye(4))
MRI_path = '/data/fanchenchen/atlas/{}_{}_{}_drop_invalid_roi_{}.nii.gz'.format(sub_id, session,
preprocessing, i)
nib.save(image_nii, MRI_path)
# print(roi_data.shape)
roi_data = self.__resize_data__(roi_data, roi_size, roi_size, roi_size) # roi_size, roi_size, roi_size
# print(roi_data.shape)
roi_data = torch.from_numpy(roi_data)
roi_data_list.append(roi_data) # roi_size, roi_size, roi_size
# save nii to Visualization
if save_nii:
image_np = roi_data.numpy()
image_nii = nib.Nifti1Image(image_np, np.eye(4))
MRI_path = '/data/fanchenchen/atlas/{}_{}_{}_resize_roi_{}.nii.gz'.format(sub_id, session,
preprocessing, i)
nib.save(image_nii, MRI_path)
if i >= 89:
break
roi_batch = torch.stack(roi_data_list).type(torch.float32) # num_roi, roi_size, roi_size, roi_size
return roi_batch
class MRIDatasetPatch(MRIDataset):
def __init__(self, caps_directory, data_file, patch_size, stride_size, transformations=None, prepare_dl=False,
patch_index=None, preprocessing="t1-linear"):
"""
Args:
caps_directory (string): Directory of all the images.
data_file (string or DataFrame): Path to the tsv file or DataFrame containing the subject/session list.
preprocessing (string): Defines the path to the data in CAPS.
transformations (callable, optional): Optional transform to be applied on a sample.
prepare_dl (bool): If true pre-extracted patches will be loaded.
patch_index (int, optional): If a value is given the same patch location will be extracted for each image.
else the dataset will load all the patches possible for one image.
patch_size (int): size of the regular cubic patch.
stride_size (int): length between the centers of two patches.
"""
self.patch_size = patch_size
self.stride_size = stride_size
self.elem_index = patch_index
self.mode = "patch"
super().__init__(caps_directory, data_file, preprocessing, transformations)
self.prepare_dl = prepare_dl
def __getitem__(self, idx):
participant, session, patch_idx, label = self._get_meta_data(idx)
if self.prepare_dl:
patch_path = path.join(self._get_path(participant, session, "patch")[0:-7]
+ '_patchsize-' + str(self.patch_size)
+ '_stride-' + str(self.stride_size)
+ '_patch-' + str(patch_idx) + '_T1w.pt')
image = torch.load(patch_path)
else:
image_path = self._get_path(participant, session, "image")
full_image = torch.load(image_path)
image = self.extract_patch_from_mri(full_image, patch_idx)
if self.transformations:
image = self.transformations(image)
sample = {'image': image, 'label': label,
'participant_id': participant, 'session_id': session, 'patch_id': patch_idx}
return sample
def num_elem_per_image(self):
if self.elem_index is not None:
return 1
image = self._get_full_image()
patches_tensor = image.unfold(1, self.patch_size, self.stride_size
).unfold(2, self.patch_size, self.stride_size
).unfold(3, self.patch_size, self.stride_size).contiguous()
patches_tensor = patches_tensor.view(-1,
self.patch_size,
self.patch_size,
self.patch_size)
num_patches = patches_tensor.shape[0]
return num_patches
def extract_patch_from_mri(self, image_tensor, index_patch):
patches_tensor = image_tensor.unfold(1, self.patch_size, self.stride_size
).unfold(2, self.patch_size, self.stride_size
).unfold(3, self.patch_size, self.stride_size).contiguous()
patches_tensor = patches_tensor.view(-1,
self.patch_size,
self.patch_size,
self.patch_size)
extracted_patch = patches_tensor[index_patch, ...].unsqueeze_(
0).clone()
return extracted_patch
class MRIDatasetRoi(MRIDataset):
def __init__(self, caps_directory, data_file, preprocessing="t1-linear",
transformations=None, prepare_dl=False):
"""
Args:
caps_directory (string): Directory of all the images.
data_file (string or DataFrame): Path to the tsv file or DataFrame containing the subject/session list.
preprocessing (string): Defines the path to the data in CAPS.
transformations (callable, optional): Optional transform to be applied on a sample.
prepare_dl (bool): If true pre-extracted patches will be loaded.
"""
self.elem_index = None
self.mode = "roi"
super().__init__(caps_directory, data_file, preprocessing, transformations)
self.prepare_dl = prepare_dl
def __getitem__(self, idx):
participant, session, roi_idx, label = self._get_meta_data(idx)
if self.prepare_dl:
raise NotImplementedError(
'The extraction of ROIs prior to training is not implemented.')
else:
image_path = self._get_path(participant, session, "image")
image = torch.load(image_path)
patch = self.extract_roi_from_mri(image, roi_idx)
if self.transformations:
patch = self.transformations(patch)
sample = {'image': patch, 'label': label,
'participant_id': participant, 'session_id': session,
'roi_id': roi_idx}
return sample
def num_elem_per_image(self):
return 2
def extract_roi_from_mri(self, image_tensor, left_is_odd):
"""
:param image_tensor: (Tensor) the tensor of the image.
:param left_is_odd: (int) if 1 the left hippocampus is extracted, else the right one.
:return: Tensor of the extracted hippocampus
"""
if self.preprocessing == "t1-linear":
if left_is_odd == 1:
# the center of the left hippocampus
crop_center = (61, 96, 68)
else:
# the center of the right hippocampus
crop_center = (109, 96, 68)
else:
raise NotImplementedError("The extraction of hippocampi was not implemented for "
"preprocessing %s" % self.preprocessing)
crop_size = (50, 50, 50) # the output cropped hippocampus size
extracted_roi = image_tensor[
:,
crop_center[0] - crop_size[0] // 2: crop_center[0] + crop_size[0] // 2:,
crop_center[1] - crop_size[1] // 2: crop_center[1] + crop_size[1] // 2:,
crop_center[2] - crop_size[2] // 2: crop_center[2] + crop_size[2] // 2:
].clone()
return extracted_roi
class MRIDatasetSlice(MRIDataset):
def __init__(self, caps_directory, data_file, preprocessing="t1-linear",
transformations=None, mri_plane=0, prepare_dl=False,
discarded_slices=20, mixed=False):
"""
Args:
caps_directory (string): Directory of all the images.
data_file (string or DataFrame): Path to the tsv file or DataFrame containing the subject/session list.
preprocessing (string): Defines the path to the data in CAPS.
transformations (callable, optional): Optional transform to be applied on a sample.
prepare_dl (bool): If true pre-extracted patches will be loaded.
mri_plane (int): Defines which mri plane is used for slice extraction.
discarded_slices (int or list): number of slices discarded at the beginning and the end of the image.
If one single value is given, the same amount is discarded at the beginning and at the end.
mixed (bool): If True will look for a 'slice_id' column in the input DataFrame to load each slice
independently.
"""
# Rename MRI plane
self.mri_plane = mri_plane
self.direction_list = ['sag', 'cor', 'axi']
if self.mri_plane >= len(self.direction_list):
raise ValueError(
"mri_plane value %i > %i" %
(self.mri_plane, len(
self.direction_list)))
# Manage discarded_slices
if isinstance(discarded_slices, int):
discarded_slices = [discarded_slices, discarded_slices]
if isinstance(discarded_slices, list) and len(discarded_slices) == 1:
discarded_slices = discarded_slices * 2
self.discarded_slices = discarded_slices
if mixed:
self.elem_index = "mixed"
else:
self.elem_index = None
self.mode = "slice"
super().__init__(caps_directory, data_file, preprocessing, transformations)
self.prepare_dl = prepare_dl
def __getitem__(self, idx):
participant, session, slice_idx, label = self._get_meta_data(idx)
slice_idx = slice_idx + self.discarded_slices[0]
if self.prepare_dl:
# read the slices directly
slice_path = path.join(self._get_path(participant, session, "slice")[0:-7]
+ '_axis-%s' % self.direction_list[self.mri_plane]
+ '_channel-rgb_slice-%i_T1w.pt' % slice_idx)
image = torch.load(slice_path)
else:
image_path = self._get_path(participant, session, "image")
full_image = torch.load(image_path)
image = self.extract_slice_from_mri(full_image, slice_idx)
if self.transformations:
image = self.transformations(image)
sample = {'image': image, 'label': label,
'participant_id': participant, 'session_id': session,
'slice_id': slice_idx}
return sample
def num_elem_per_image(self):
if self.elem_index == "mixed":
return 1
image = self._get_full_image()
return image.size(self.mri_plane + 1) - \
self.discarded_slices[0] - self.discarded_slices[1]
def extract_slice_from_mri(self, image, index_slice):
"""
This is a function to grab one slice in each view and create a rgb image for transferring learning: duplicate the slices into R, G, B channel
:param image: (tensor)
:param index_slice: (int) index of the wanted slice
:return:
To note, for each view:
Axial_view = "[:, :, slice_i]"
Coronal_view = "[:, slice_i, :]"
Sagittal_view= "[slice_i, :, :]"
"""
image = image.squeeze(0)
simple_slice = image[(slice(None),) * self.mri_plane + (index_slice,)]
triple_slice = torch.stack((simple_slice, simple_slice, simple_slice))
return triple_slice
def return_dataset(mode, input_dir, data_df, preprocessing,
transformations, params, cnn_index=None):
"""
Return appropriate Dataset according to given options.
Args:
mode: (str) input used by the network. Chosen from ['image', 'patch', 'roi', 'slice'].
input_dir: (str) path to a directory containing a CAPS structure.
data_df: (DataFrame) List subjects, sessions and diagnoses.
preprocessing: (str) type of preprocessing wanted ('t1-linear' or 't1-extensive')
transformations: (transforms) list of transformations performed on-the-fly.
params: (Namespace) options used by specific modes.
cnn_index: (int) Index of the CNN in a multi-CNN paradigm (optional).
Returns:
(Dataset) the corresponding dataset.
"""
if cnn_index is not None and mode in ["image", "roi", "slice"]:
raise ValueError("Multi-CNN is not implemented for %s mode." % mode)
if params.model == "ROI_GCN":
use_roi = True
else:
use_roi = False
if mode == "image":
return MRIDatasetImage(
input_dir,
data_df,
preprocessing,
transformations=transformations,
crop_padding_to_128=params.crop_padding_to_128,
resample_size=params.resample_size,
fake_caps_path=params.fake_caps_path,
# only_use_fake=params.only_use_fake,
roi=use_roi,
roi_size=params.roi_size,
model=params.model,
data_preprocess=params.data_preprocess,
data_Augmentation=params.data_Augmentation,
method_2d=params.method_2d
)
if mode == "patch":
return MRIDatasetPatch(
input_dir,
data_df,
params.patch_size,
params.stride_size,
preprocessing=preprocessing,
transformations=transformations,
prepare_dl=params.prepare_dl,
patch_index=cnn_index
)
elif mode == "roi":
return MRIDatasetRoi(
input_dir,
data_df,
preprocessing=preprocessing,
transformations=transformations
)
elif mode == "slice":
return MRIDatasetSlice(
input_dir,
data_df,
preprocessing=preprocessing,
transformations=transformations,
mri_plane=params.mri_plane,
prepare_dl=params.prepare_dl,
discarded_slices=params.discarded_slices)
else:
raise ValueError("Mode %s is not implemented." % mode)
def compute_num_cnn(input_dir, tsv_path, options, data="train"):
transformations = get_transforms(options)
if data == "train":
example_df, _ = load_data(tsv_path, options.diagnoses, 0, options.n_splits, options.baseline)
elif data == "classify":
example_df = pd.read_csv(tsv_path, sep='\t')
else:
example_df = load_data_test(tsv_path, options.diagnoses)
full_dataset = return_dataset(options.mode, input_dir, example_df,
options.preprocessing, transformations, options)
return full_dataset.elem_per_image
##################################
# Transformations
##################################
class GaussianSmoothing(object):
def __init__(self, sigma):
self.sigma = sigma
def __call__(self, sample):
from scipy.ndimage.filters import gaussian_filter
image = sample['image']
np.nan_to_num(image, copy=False)
smoothed_image = gaussian_filter(image, sigma=self.sigma)
sample['image'] = smoothed_image
return sample
def __repr__(self):
return self.__class__.__name__ + '(sigma={})'.format(self.sigma)
class ToTensor(object):
"""Convert image type to Tensor and diagnosis to diagnosis code"""
def __call__(self, image):
np.nan_to_num(image, copy=False)
image = image.astype(float)
return torch.from_numpy(image[np.newaxis, :]).float()
def __repr__(self):
return self.__class__.__name__
class MinMaxNormalization(object):
"""Normalizes a tensor between 0 and 1"""
def __call__(self, **data_dict):
image = data_dict['data']
image = (image - image.min()) / (image.max() - image.min())
data_dict['data'] = image
return data_dict
def __repr__(self):
return self.__class__.__name__
class ItensityNormalizeNonzeorVolume(object):
"""
normalize the itensity of an nd volume based on the mean and std of nonzeor region
inputs:
volume: the input nd volume
outputs:
out: the normalized nd volume
"""
def __call__(self, **data_dict):
image = data_dict['data']
image = image.squeeze()
image = np.array(image)
pixels = image[image > 0]
mean = pixels.mean()
std = pixels.std()
out = (image - mean) / std
out_random = np.random.normal(0, 1, size=image.shape)
out[image == 0] = out_random[image == 0]
out = torch.from_numpy(out.copy())
data_dict['data'] = out.unsqueeze(0)
return data_dict
def __repr__(self):
return self.__class__.__name__
class ItensityNormalizeAllVolume(object):
"""
normalize the itensity of an nd volume based on the mean and std of all region
inputs:
volume: the input nd volume
outputs:
out: the normalized nd volume
"""
def __call__(self, **data_dict):
image = data_dict['data']
image = (image - image.mean()) / image.std()
data_dict['data'] = image
return data_dict
def __repr__(self):
return self.__class__.__name__
class CropPpadding128(object):
"""
crop padding image to 128
"""
def __call__(self, **data_dict):
image = data_dict['data']
if image.shape[1] == 121 and image.shape[2] == 145 and image.shape[
3] == 121:
image = image[:, :, 8:-9, :] # [1, 121, 128, 121]
image = image.unsqueeze(0) # [1, 1, 121, 128, 121]
pad = torch.nn.ReplicationPad3d((4, 3, 0, 0, 4, 3))
image = pad(image) # [1, 1, 128, 128, 128]
image = image.squeeze(0) # [1, 128, 128, 128]
elif image.shape[1] == 128 and image.shape[2] == 128 and image.shape[
3] == 128:
pass
else:
assert image.shape[1] == 121 and image.shape[2] == 145 and image.shape[
3] == 121, "image shape must be 1*121*145*122 or 1*128*128*128, but given shape:{}".format(image.shape)
data_dict['data'] = image
return data_dict
def __repr__(self):
return self.__class__.__name__
class Resize(torch.nn.Module):
"""
Resize data to target size
"""
def __init__(self, resample_size):
super().__init__()
# assert resample_size > 0, 'resample_size should be a int positive number'
self.resample_size = resample_size
def forward(self, **data_dict):
image = data_dict['data']
image = image.unsqueeze(0)
image = F.interpolate(image, size=self.resample_size)
image = image.squeeze(0)
data_dict['data'] = image
return data_dict
def __repr__(self):
return self.__class__.__name__ + '(resample_size={0})'.format(self.resample_size)
class CheckDictSize(object):
"""
check dict size to dim 5 :1,1,128,128,128
"""
def __call__(self, **dict):
image = dict['data']
if len(image.shape) == 4:
image = np.array(image.unsqueeze(0))
elif len(image.shape) == 3:
image = np.array(image.unsqueeze(0).unsqueeze(0))
assert len(image.shape) == 5
dict['data'] = image
return dict
def __repr__(self):
return self.__class__.__name__
class DictToImage(object):
"""
dict 2 data
"""
def __call__(self, **dict):
image = dict['data']
if len(image.shape) == 5:
image = image.squeeze(0)
elif len(image.shape) == 3:
image = image.unsqueeze(0)
return image
def __repr__(self):
return self.__class__.__name__
class DropInvalidRange(torch.nn.Module):
"""
Cut off the invalid area
"""
def __init__(self, keep_size=True):
super().__init__()
self.keep_size = keep_size
def __call__(self, **data_dict):
image = data_dict['data']
image = image.squeeze(0)
zero_value = image[0, 0, 0]
z, h, w = image.shape
non_zeros_idx = np.where(image != zero_value)
[max_z, max_h, max_w] = np.max(np.array(non_zeros_idx), axis=1)
[min_z, min_h, min_w] = np.min(np.array(non_zeros_idx), axis=1)
image = image[min_z:max_z, min_h:max_h, min_w:max_w].unsqueeze(0)
if self.keep_size:
image = image.unsqueeze(0)
image = F.interpolate(image, size=[z, h, w])
image = image.squeeze(0)
data_dict['data'] = image
return data_dict
def __repr__(self):
return self.__class__.__name__ + '(keep_size={})'.format(self.keep_size)
def get_transforms(params, is_training=True):
if params.mode == 'image':
trans_list = []
trans_list.append(MinMaxNormalization())
if params.preprocessing != 't1-linear':
trans_list.append(CropPpadding128())
trans_list.append(DropInvalidRange(keep_size=True))
if params.resample_size is not None:
trans_list.append(Resize(params.resample_size))
if params.data_preprocess == 'MinMax':
trans_list.append(MinMaxNormalization())
elif params.data_preprocess == 'NonzeorZscore':
trans_list.append(ItensityNormalizeNonzeorVolume())
elif params.data_preprocess == 'AllzeorZscore':
trans_list.append(ItensityNormalizeAllVolume())
if is_training:
if params.ContrastAugmentationTransform > 0:
trans_list.append(CheckDictSize()) # for this code library, input data must be dim=5, 1,1,128,128,128
trans_list.append(ContrastAugmentationTransform((0.3, 3.), preserve_range=True,
p_per_sample=params.ContrastAugmentationTransform))
if params.BrightnessTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
BrightnessTransform(mu=0, sigma=1, per_channel=False, p_per_sample=params.BrightnessTransform))
if params.GammaTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
GammaTransform(gamma_range=(0.5, 2), invert_image=False, per_channel=False, retain_stats=False,
p_per_sample=params.GammaTransform))
if params.BrightnessGradientAdditiveTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(BrightnessGradientAdditiveTransform(scale=(5, 5),
p_per_sample=params.BrightnessGradientAdditiveTransform))
if params.LocalSmoothingTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(LocalSmoothingTransform(scale=(5, 5),
p_per_sample=params.LocalSmoothingTransform))
if params.RandomShiftTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
RandomShiftTransform(shift_mu=0, shift_sigma=3, p_per_sample=params.RandomShiftTransform))
if params.RicianNoiseTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
RicianNoiseTransform(noise_variance=(0, 0.1), p_per_sample=params.RicianNoiseTransform))
if params.GaussianNoiseTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
GaussianNoiseTransform(noise_variance=(0, 0.1), p_per_sample=params.RicianNoiseTransform))
if params.GaussianBlurTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
GaussianBlurTransform(blur_sigma=(1, 5), different_sigma_per_channel=False,
p_per_sample=params.RicianNoiseTransform))
if params.Rot90Transform > 0:
trans_list.append(CheckDictSize())
trans_list.append(Rot90Transform(num_rot=(1, 2, 3), axes=(0, 1, 2), p_per_sample=params.Rot90Transform))
if params.MirrorTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(MirrorTransform(axes=(0, 1, 2), p_per_sample=params.MirrorTransform))
if params.SpatialTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
SpatialTransform(patch_size=(params.resample_size, params.resample_size, params.resample_size),
p_el_per_sample=params.SpatialTransform,
p_rot_per_axis=params.SpatialTransform,
p_scale_per_sample=params.SpatialTransform,
p_rot_per_sample=params.SpatialTransform))
trans_list.append(DictToImage())
transformations = Compose(trans_list)
if params.model in ['DeepCNN', 'DeepCNN_gcn']:
trans_list = []
trans_list.append(MinMaxNormalization())
trans_list.append(Resize(resample_size=[49, 39, 38]))
trans_list.append(DictToImage())
transformations = Compose(trans_list)
if params.model in ['CNN2020', 'CNN2020_gcn']:
trans_list = []
trans_list.append(MinMaxNormalization())
trans_list.append(Resize(resample_size=[139, 177, 144]))
trans_list.append(ItensityNormalizeAllVolume())
trans_list.append(DictToImage())
transformations = Compose(trans_list)
elif params.mode in ["patch", "roi"]:
if params.minmaxnormalization:
transformations = Compose([MinMaxNormalization(), DictToImage()])
else:
transformations = None
elif params.mode == "slice":
trg_size = (224, 224)
if params.minmaxnormalization:
transformations = transforms.Compose([MinMaxNormalization(),
transforms.ToPILImage(),
transforms.Resize(trg_size),
transforms.ToTensor()])
else:
transformations = transforms.Compose([transforms.ToPILImage(),
transforms.Resize(trg_size),
transforms.ToTensor()])
else:
raise ValueError("Transforms for mode %s are not implemented." % params.mode)
print('transformer:{}'.format(transformations.__repr__))
return transformations
################################
# tsv files loaders
################################
def load_data(train_val_path, diagnoses_list,
split, n_splits=None, baseline=True, fake_caps_path=None, only_use_fake=False):
train_df = pd.DataFrame()
valid_df = pd.DataFrame()
if n_splits is None:
train_path = path.join(train_val_path, 'train')
valid_path = path.join(train_val_path, 'validation')
else:
train_path = path.join(train_val_path, 'train_splits-' + str(n_splits),
'split-' + str(split))
valid_path = path.join(train_val_path, 'validation_splits-' + str(n_splits),
'split-' + str(split))
print("Train", train_path)
print("Valid", valid_path)
for diagnosis in diagnoses_list:
if isinstance(baseline, str):
if baseline in ['true', 'True']:
train_diagnosis_path = path.join(
train_path, diagnosis + '_baseline.tsv')
elif baseline in ['false', 'False']:
train_diagnosis_path = path.join(train_path, diagnosis + '.tsv')
else:
if baseline:
train_diagnosis_path = path.join(
train_path, diagnosis + '_baseline.tsv')
else:
train_diagnosis_path = path.join(train_path, diagnosis + '.tsv')
valid_diagnosis_path = path.join(
valid_path, diagnosis + '_baseline.tsv')
train_diagnosis_df = pd.read_csv(train_diagnosis_path, sep='\t')
valid_diagnosis_df = pd.read_csv(valid_diagnosis_path, sep='\t')
train_df = pd.concat([train_df, train_diagnosis_df])
valid_df = pd.concat([valid_df, valid_diagnosis_df])
train_df.reset_index(inplace=True, drop=True)
valid_df.reset_index(inplace=True, drop=True)
if fake_caps_path is not None and not baseline:
path_list = os.listdir(fake_caps_path)
for t in range(len(path_list)):
if path_list[t] != 'subjects':
file_name = path_list[t]
fake_tsv_path = os.path.join(fake_caps_path, file_name)
fake_df = pd.read_csv(fake_tsv_path, sep='\t')
train_fake_df = pd.DataFrame(columns={"participant_id": "", "session_id": "", "diagnosis": ""})
for i in range(len(fake_df)):
subject = fake_df.loc[i]['participant_id']
ses_id = fake_df.loc[i]["session_id"]
filted_df_train = train_df.loc[train_df['participant_id'] == subject].drop_duplicates().reset_index(
drop=True)
if filted_df_train.shape[0] != 0:
filted_fake_df = fake_df.loc[fake_df['participant_id'] == subject].drop_duplicates().reset_index(
drop=True)
diagnosis = filted_df_train.loc[0]["diagnosis"]
filted_fake_df['diagnosis'] = diagnosis
train_fake_df = train_fake_df.append(filted_fake_df).drop_duplicates().reset_index(drop=True)
if only_use_fake:
print('*** [Only] use {} fake images for train!'.format(len(train_fake_df)))
train_df = train_fake_df
else:
print('use {} fake images for train!'.format(len(train_fake_df)))
train_df = train_df.append(train_fake_df).drop_duplicates().reset_index(drop=True)
saved_tsv_path = os.path.join(train_path, fake_caps_path.split('/')[-1])
save_path_train = os.path.join(saved_tsv_path, 'train_real_and_fake_' + "_".join(diagnoses_list) + '.tsv')
if not os.path.exists(saved_tsv_path):
os.makedirs(saved_tsv_path)
train_df.to_csv(save_path_train, sep='\t', index=False)
print('save: {}'.format(save_path_train))
print('train fake df:{}'.format(train_fake_df))
print('train real and fake df:{}'.format(train_df))
print('valid df:{}'.format(valid_df))
else:
print('only train real df:{}'.format(train_df))
print('valid df:{}'.format(valid_df))
return train_df, valid_df
def load_data_test(test_path, diagnoses_list):
test_df = | pd.DataFrame() | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64('NaT')
tdi = timedelta_range('1 day', periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array,
scalar):
box = box_with_array
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta('5m4s').to_timedelta64()])
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == 'Venkman':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_add_sub_td64_nat(self, box):
# GH#23320 special handling for timedelta64("NaT")
tdi = pd.TimedeltaIndex([NaT, Timedelta('1s')])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
# TODO: this was taken from tests.series.test_operators; de-duplicate
def test_timedelta64_operations_with_DateOffset(self):
# GH#10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3),
timedelta(minutes=5, seconds=6),
timedelta(hours=2, minutes=5, seconds=3)])
tm.assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
tm.assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box):
# GH#18849
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box):
# GH#18824, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box_df_fail):
# GH#18849
box = box_df_fail
box2 = Series if box in [pd.Index, tm.to_array] else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox,
box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box_with_array)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps:
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# TODO: Moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize("m", [1, 3, 10])
@pytest.mark.parametrize("unit", ['D', 'h', 'm', 's', 'ms', 'us', 'ns'])
def test_timedelta64_conversions(self, m, unit):
startdate = Series(pd.date_range('2013-01-01', '2013-01-03'))
enddate = Series(pd.date_range('2013-03-01', '2013-03-03'))
ser = enddate - startdate
ser[2] = np.nan
# op
expected = Series([x / np.timedelta64(m, unit) for x in ser])
result = ser / np.timedelta64(m, unit)
tm.assert_series_equal(result, expected)
# reverse op
expected = Series([Timedelta(np.timedelta64(m, unit)) / x
for x in ser])
result = np.timedelta64(m, unit) / ser
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng * two_hours
def test_tdi_mul_int_array_zerodim(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, xbox)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 1.0))
expected = tm.box_expected(expected, xbox)
result = idx * Series(rng5f + 1.0)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_with_array):
box = box_with_array
xbox = get_upcast_box(box, other)
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
expected._data.freq = None
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__, __rdiv__
def test_td64arr_div_nat_invalid(self, box_with_array):
# don't allow division by NaT (maybe could in the future)
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError,
match="'?true_divide'? cannot use operands"):
rng / pd.NaT
with pytest.raises(TypeError, match='Cannot divide NaTType by'):
pd.NaT / rng
def test_td64arr_div_td64nat(self, box_with_array):
# GH#23829
rng = timedelta_range('1 days', '10 days',)
rng = tm.box_expected(rng, box_with_array)
other = np.timedelta64('NaT')
expected = np.array([np.nan] * 10)
expected = tm.box_expected(expected, box_with_array)
result = rng / other
tm.assert_equal(result, expected)
result = other / rng
tm.assert_equal(result, expected)
def test_td64arr_div_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx / 1
tm.assert_equal(result, idx)
with pytest.raises(TypeError, match='Cannot divide'):
# GH#23829
1 / idx
def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array):
# GH#20088, GH#22163 ensure DataFrame returns correct dtype
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours,
box_with_array):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_td64_ndarray(self, box_with_array):
# GH#22631
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'])
expected = pd.Float64Index([12, np.nan, 24])
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
other = np.array([2, 4, 2], dtype='m8[h]')
result = rng / other
tm.assert_equal(result, expected)
result = rng / tm.box_expected(other, box_with_array)
tm.assert_equal(result, expected)
result = rng / other.astype(object)
tm.assert_equal(result, expected)
result = rng / list(other)
tm.assert_equal(result, expected)
# reversed op
expected = 1 / expected
result = other / rng
tm.assert_equal(result, expected)
result = tm.box_expected(other, box_with_array) / rng
tm.assert_equal(result, expected)
result = other.astype(object) / rng
tm.assert_equal(result, expected)
result = list(other) / rng
tm.assert_equal(result, expected)
def test_tdarr_div_length_mismatch(self, box_with_array):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'])
mismatched = [1, 2, 3, 4]
rng = tm.box_expected(rng, box_with_array)
for obj in [mismatched, mismatched[:2]]:
# one shorter, one longer
for other in [obj, np.array(obj), pd.Index(obj)]:
with pytest.raises(ValueError):
rng / other
with pytest.raises(ValueError):
other / rng
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = td1 // scalar_td
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = scalar_td // td1
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box_with_array,
scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx // 1
tm.assert_equal(result, idx)
pattern = ('floor_divide cannot use operands|'
'Cannot divide int by Timedelta*')
with pytest.raises(TypeError, match=pattern):
1 // idx
def test_td64arr_floordiv_tdlike_scalar(self, two_hours, box_with_array):
tdi = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Int64Index((np.arange(10) + 1) * 12, name='foo')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi // two_hours
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=10, seconds=7),
Timedelta('10m7s'),
Timedelta('10m7s').to_timedelta64()
], ids=lambda x: type(x).__name__)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_with_array):
# GH#19125
tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = tm.box_expected(tdi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
# ------------------------------------------------------------------
# mod, divmod
# TODO: operations with timedelta-like arrays, numeric arrays,
# reversed ops
def test_td64arr_mod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range('1 Day', '9 days')
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(['1 Day', '2 Days', '0 Days'] * 3)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % three_days
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, three_days)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // three_days)
def test_td64arr_mod_int(self, box_with_array):
tdi = timedelta_range('1 ns', '10 ns', periods=10)
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(['1 ns', '0 ns'] * 5)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % 2
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
2 % tdarr
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, 2)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // 2)
def test_td64arr_rmod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range('1 Day', '9 days')
tdarr = tm.box_expected(tdi, box_with_array)
expected = ['0 Days', '1 Day', '0 Days'] + ['3 Days'] * 6
expected = TimedeltaIndex(expected)
expected = tm.box_expected(expected, box_with_array)
result = three_days % tdarr
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(three_days, tdarr)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], three_days // tdarr)
# ------------------------------------------------------------------
# Operations with invalid others
def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box_with_array)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with pytest.raises(TypeError, match=pattern):
td1 * scalar_td
with pytest.raises(TypeError, match=pattern):
scalar_td * td1
def test_td64arr_mul_too_short_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx * idx[:3]
with pytest.raises(ValueError):
idx * np.array([1, 2])
def test_td64arr_mul_td64arr_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx * idx
# ------------------------------------------------------------------
# Operations with numeric others
@pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)])
def test_td64arr_mul_numeric_scalar(self, box_with_array, one):
# GH#4521
# divide/multiply by integers
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
expected = Series(['-59 Days', '-59 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(['118 Days', '118 Days', 'NaT'],
dtype='timedelta64[ns]')
expected = | tm.box_expected(expected, box_with_array) | pandas.util.testing.box_expected |
from logging import log
import numpy as np
import pandas as pd
from scipy import interpolate
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import ScalarFormatter
from flask import Flask, render_template, request
from tkinter import *
from tkinter import ttk
import sys
import os
import shutil
import random
from matplotlib.ticker import MaxNLocator
from pathlib import Path
import math
import copy
#from decimal import Decimal, ROUND_HALF_UP
def readinput(filename):
csv_input = pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",")
symbol = csv_input['Symbol']
value = csv_input['Value']
unit = csv_input['Unit']
valueDict = {}
unitDict = {}
for i, j, k in zip(symbol, value, unit):
valueDict[i] = float(j)
unitDict[i] = str(k)
return valueDict, unitDict
def CeqLHVFunc(filename,fuelName):
csv_input = | pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 30 15:16:34 2020
@author: thodoris
"""
import pandas
import numpy
import os
import sys
import re
import multiprocessing
sys.path.append('../')
from core_functions import uniprot_query_with_limit
from constant_variables import define_main_dir
def worker(_id, ref_proteomes_list):
query_columns = ['organism', 'lineage(PHYLUM)', 'lineage(CLASS)']
df_columns = ['proteome_id', 'species', 'phylum', 'class']
if not os.path.isfile(main_dir+'taxonomy_details_'+_id+'.txt'):
F_details = open(main_dir+'taxonomy_details_'+_id+'.txt', 'a')
details = df_columns
F_details.write('\t'.join(details)+'\n')
F_details.close()
proteomes = []
else:
df = pandas.read_csv(main_dir+'taxonomy_details_'+_id+'.txt', sep='\t')
proteomes = df.proteome_id.tolist()
for i, ref_proteome in enumerate(ref_proteomes_list):
if ref_proteome in proteomes:
continue
query = 'proteome:'+ref_proteome
success, data, column_names = uniprot_query_with_limit(query, query_columns, 5)
if success == False:
continue
else:
for entry in data[1:]:
f = entry.split('\t')
details = [ref_proteome, f[0], f[1], f[2]]
F_details = open(main_dir+'taxonomy_details_'+_id+'.txt', 'a')
F_details.write('\t'.join(details)+'\n')
F_details.close()
break
queue.put({i:'ok'})
'''
DESCRIPTION:
That process is executed only for bacteria, in order to get their taxonomic
classification and select at least one species from all the Phyla. The script
reads the 01_ranked_species_based_on_genomic_annotation.tsv file.
The whole process is parallelized, using the multiprocessing package (default
number of processes is 32). So 32 different output files are created in the
'scanning' directory. All these will be concatenated in the following steps
to clarify the species that will be used in the analysis.
Output: the directory 'scanning_taxonomies' and bacteria_taxonomies.tsv
'''
if __name__ == "__main__":
prokaryotic_class='bacteria'
main_dir = define_main_dir(prokaryotic_class)+'preprocessing/'
if not os.path.exists(main_dir+'scanning_taxonomies/'):
os.makedirs(main_dir+'scanning_taxonomies/')
f = main_dir+'01_ranked_species_based_on_genomic_annotation.tsv'
ref_proteomes_df = pandas.read_csv(f, sep='\t')
ref_proteomes_list = ref_proteomes_df.proteome_id.tolist()
queue = multiprocessing.Queue()
procs = []
nprocs = 32
batch = int(numpy.math.floor(len(ref_proteomes_list)/float(nprocs)))
for i in range(nprocs):
if i == 31:
tmp = ref_proteomes_list[i*batch:]
else:
tmp = ref_proteomes_list[i*batch:(i+1)*batch]
p = multiprocessing.Process(target=worker, args=(str(i).zfill(2), tmp,))
p.start()
procs.append(p)
for p in procs:
p.join()
files = [main_dir+'scanning_taxonomies/'+i for i in os.listdir(main_dir+'scanning_taxonomies/')]
files = filter(lambda x: re.search('taxonomy_details', x), files)
details_list = []
for f in files:
df = | pandas.read_csv(f, sep='\t') | pandas.read_csv |
#!/usr/bin/python3
import os
import argparse
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import griddata
import torch
import torch.nn as nn
from core.model import MNISTNet
from core.dataset import dataset_fn
from utils.config import load_config, create_exp_from_config
from utils.helpers import set_rcParams
from core import odin
def run_grid_search(dl_in, dl_out, model, temperatures, epsilons, num_img, results_gridsearch_csv):
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
columns = ['temperature', 'epsilon', 'method', 'rocauc', 'fpr95']
df = pd.DataFrame(columns=columns)
for temper in temperatures:
for epsi in epsilons:
df_in = odin.predict_scores(model, device, dl_in, epsi, temper, num_img)
df_out = odin.predict_scores(model, device, dl_out, epsi, temper, num_img)
for method in ['base', 'odin']:
roc_auc, fpr95 = odin.evaluate_scores(df_in[df_in['method'] == method]['score'],
df_out[df_out['method'] == method]['score'])
row = {'temperature': temper, 'epsilon': epsi, 'method': method,
'rocauc': roc_auc, 'fpr95': fpr95}
df = df.append(row, ignore_index=True)
print(f'-----------------------------------------------------')
print(f'Hyperparams t={temper}, eps={epsi}')
print(f'AUC: {roc_auc}')
print(f'FPR95: {fpr95}')
# validation results:
df.to_csv(results_gridsearch_csv)
def plot_gridsearch_results(df, temperatures, epsilons, log_dir):
set_rcParams()
X, Y = np.meshgrid(temperatures, epsilons)
subset = df.loc[df['method'] == 'odin']
for measure in ['rocauc', 'fpr95']:
fig, ax = plt.subplots(figsize=(3, 3))
grid_z0 = griddata(subset[['temperature', 'epsilon']], subset[measure], (X, Y), method='nearest')
cmap = 'crest'
if measure == 'rocauc':
vmin, vmax = 0.5, 1.0
vmin, vmax = None, None
cmap = f'{cmap}_r'
elif measure == 'fpr95':
vmin, vmax = 0.0, 1.0
vmin, vmax = None, None
ax = sns.heatmap(grid_z0, annot=True, linewidths=.5, cmap=cmap, vmin=vmin, vmax=vmax)
ax.set_xlabel(r'temperature $\tau$')
ax.set_xticklabels(temperatures)
ax.set_ylabel(r'perturbation $\epsilon$')
ax.set_yticklabels(epsilons)
file_name = os.path.join(log_dir, f'ood_{measure}.pdf')
fig.savefig(file_name)
def eval_best_param(dl_in, dl_out, model, gridsearch_df, results_csv):
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
subset = gridsearch_df.loc[gridsearch_df['method'] == 'odin']
best_row = subset[subset.fpr95 == subset.fpr95.min()]
temper = best_row['temperature'].values[0]
epsi = best_row['epsilon'].values[0]
num_img = len(dl_in.dataset)
df_in = odin.predict_scores(model, device, dl_in, epsi, temper, num_img)
df_out = odin.predict_scores(model, device, dl_out, epsi, temper, num_img)
columns = ['temperature', 'epsilon', 'method', 'rocauc', 'fpr95']
df = pd.DataFrame(columns=columns)
for method in ['base', 'odin']:
roc_auc, fpr95 = odin.evaluate_scores(df_in[df_in['method'] == method]['score'],
df_out[df_out['method'] == method]['score'])
row = {'temperature': temper, 'epsilon': epsi, 'method': method,
'rocauc': roc_auc, 'fpr95': fpr95}
df = df.append(row, ignore_index=True)
df.to_csv(results_csv)
def main(exp_dir, config_file, seed, run_gridsearch=True, run_plot=True, run_eval=True):
exp_name = create_exp_from_config(config_file, args.exp_dir)
print(f'run ODIN for configuration: {exp_name}')
# paths
log_dir = os.path.join(exp_dir, exp_name)
results_gridsearch_csv = os.path.join(log_dir, 'ood_gridsearch.csv')
results_test_csv = os.path.join(log_dir, 'ood_test.csv')
# hyperparam range:
temperatures = [1, 10, 100, 1000]
epsilons = [0, 0.001, 0.002, 0.003, 0.004]
###############################################################################################################################
# Data preparation
###############################################################################################################################
params = load_config(config_file)
dataloader = dataset_fn(seed=seed, params_dict=params['dataset'])
if params['model']['task_classifier_type'] == 'mnist':
model = MNISTNet(n_outputs=params['model']['n_outputs'],
checkpoint_path=params['model']['task_classifier_path'],
download=True)
else:
raise NotImplementedError
###############################################################################################################################
# Hyperparameter search and evaluation on test fold
###############################################################################################################################
model.eval()
if not run_gridsearch and not os.path.exists(results_gridsearch_csv):
raise ValueError('must run grid search.')
if run_gridsearch:
num_img = 1000
dl_in = dataloader['validation']['p']
dl_out = dataloader['validation']['q']
run_grid_search(dl_in, dl_out, model, temperatures, epsilons,
num_img, results_gridsearch_csv)
if run_plot:
df = | pd.read_csv(results_gridsearch_csv) | pandas.read_csv |
# Notebook - Tab 1
import tkinter as tk
from tkinter import filedialog
import pandas as pd
from mmm.functions import *
from mmm.moneyManager import *
class DataImportTab(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
self._frame = None
self.impDF = pd.DataFrame()
self.switch_frame(DataImportTab_Frame1)
def switch_frame(self, frame_class):
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
def openFile(self):
self.master.master.attributes("-topmost", False)
self.fname = filedialog.askopenfilename(initialdir = os.path.abspath(os.getcwd() + "/../"),title = "Select file",filetypes = [("csv files","*.csv")])
self.master.master.attributes("-topmost", True)
self.update()
try:
self.impDF = import_transFileUFCU(self.fname)
isAccountId = True
except:
try:
self.impDF = import_histFileUFCU(self.fname,0)
isAccountId = False
except:
self.impDF = pd.DataFrame()
isAccountId = None
return isAccountId
# ==================first frame for DataImportTab=========================================================
class DataImportTab_Frame1(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
self.openButton = tk.Button(self, text='Open', command=lambda: master.switch_frame(DataImportTab_Frame2))
self.openButton.grid (row = 0, column = 2,padx=5,sticky=tk.W)
if len(master.impDF)>0:
newDF = pd.concat([master.impDF , master.master.master.DF], ignore_index=True)
newDF = newDF.drop_duplicates(subset=['account', 'trans_id',"amount"], keep="last").reset_index(drop=True)
master.master.master.DF = newDF.sort_values("trans_id")
self.fname = ""
master.impDF = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from collections import OrderedDict, Counter
import itertools
def select_columns_by_metebolic_parm(df, param_name, exclude=False):
if exclude == True:
mask = ~df.columns.str.contains(pat=param_name)
return df.loc[:, mask]
mask = df.columns.str.contains(pat=param_name)
return df.loc[:, mask]
def _get_columns_names_list(df):
return df.columns.values.tolist()
def _make_dict_to_replace_names(columns_names_list, pattern_addition_to_parms):
leng = len(columns_names_list)
return {
columns_names_list[i]:
pattern_addition_to_parms + columns_names_list[i]
for i in range(leng)
}
def _get_actuals_values(df):
df_actuals_features_calculeted = df.diff()
first_row_df_cumuletive = df.iloc[0:1]
return df_actuals_features_calculeted.fillna(first_row_df_cumuletive)
def pandas_dataframe_from_path(path, datetime_column_name):
return | pd.read_csv(path, date_parser=datetime_column_name) | pandas.read_csv |
"""
Functions for preparing various inputs passed to the DataFrame or Series
constructors before passing them to a BlockManager.
"""
from collections import abc
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import numpy.ma as ma
from pandas._libs import lib
from pandas._typing import Axis, DtypeObj, Label, Scalar
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
construct_1d_ndarray_preserving_na,
dict_compat,
maybe_cast_to_datetime,
maybe_convert_platform,
maybe_infer_to_datetimelike,
maybe_upcast,
)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_list_like,
is_named_tuple,
is_object_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeIndex,
ABCIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas.core import algorithms, common as com
from pandas.core.arrays import Categorical
from pandas.core.construction import extract_array, sanitize_array
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import (
Index,
ensure_index,
get_objs_combined_axis,
union_indexes,
)
from pandas.core.internals.managers import (
create_block_manager_from_arrays,
create_block_manager_from_blocks,
)
if TYPE_CHECKING:
from numpy.ma.mrecords import MaskedRecords
from pandas import Series
# ---------------------------------------------------------------------
# BlockManager Interface
def arrays_to_mgr(
arrays,
arr_names,
index,
columns,
dtype: Optional[DtypeObj] = None,
verify_integrity: bool = True,
):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
arr_names = ensure_index(arr_names)
if verify_integrity:
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
columns = ensure_index(columns)
else:
columns = ensure_index(columns)
index = ensure_index(index)
# from BlockManager perspective
axes = [columns, index]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def masked_rec_array_to_mgr(
data: "MaskedRecords", index, columns, dtype: Optional[DtypeObj], copy: bool
):
"""
Extract from a masked rec array and create the manager.
"""
# essentially process a record array then fill it
fdata = ma.getdata(data)
if index is None:
index = _get_names_from_index(fdata)
if index is None:
index = ibase.default_index(len(data))
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for col in arr_columns:
arr = data[col]
fv = arr.fill_value
mask = ma.getmaskarray(arr)
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype)
if copy:
mgr = mgr.copy()
return mgr
# ---------------------------------------------------------------------
# DataFrame Constructor Interface
def init_ndarray(values, index, columns, dtype: Optional[DtypeObj], copy: bool):
# input must be a ndarray, list, Series, index
if isinstance(values, ABCSeries):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# we could have a categorical type passed or coerced to 'category'
# recast this to an arrays_to_mgr
if is_categorical_dtype(getattr(values, "dtype", None)) or is_categorical_dtype(
dtype
):
if not hasattr(values, "dtype"):
values = _prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values), 1, index, columns)
return arrays_to_mgr([values], columns, index, columns, dtype=dtype)
elif is_extension_array_dtype(values) or is_extension_array_dtype(dtype):
# GH#19157
if isinstance(values, np.ndarray) and values.ndim > 1:
# GH#12513 a EA dtype passed with a 2D array, split into
# multiple EAs that view the values
values = [values[:, n] for n in range(values.shape[1])]
else:
values = [values]
if columns is None:
columns = Index(range(len(values)))
return arrays_to_mgr(values, columns, index, columns, dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy)
if dtype is not None and not is_dtype_equal(values.dtype, dtype):
try:
values = construct_1d_ndarray_preserving_na(
values.ravel(), dtype=dtype, copy=False
).reshape(values.shape)
except Exception as orig:
# e.g. ValueError when trying to cast object dtype to float64
raise ValueError(
f"failed to cast to '{dtype}' (Exception was: {orig})"
) from orig
# _prep_ndarray ensures that values.ndim == 2 at this point
index, columns = _get_axes(
values.shape[0], values.shape[1], index=index, columns=columns
)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values.dtype):
if values.ndim == 2 and values.shape[0] != 1:
# transpose and separate blocks
dvals_list = [maybe_infer_to_datetimelike(row) for row in values]
for n in range(len(dvals_list)):
if isinstance(dvals_list[n], np.ndarray):
dvals_list[n] = dvals_list[n].reshape(1, -1)
from pandas.core.internals.blocks import make_block
# TODO: What about re-joining object columns?
block_values = [
make_block(dvals_list[n], placement=[n], ndim=2)
for n in range(len(dvals_list))
]
else:
datelike_vals = maybe_infer_to_datetimelike(values)
block_values = [datelike_vals]
else:
block_values = [values]
return create_block_manager_from_blocks(block_values, [columns, index])
def init_dict(data: Dict, index, columns, dtype: Optional[DtypeObj] = None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
arrays: Union[Sequence[Any], "Series"]
if columns is not None:
from pandas.core.series import Series
arrays = Series(data, index=columns, dtype=object)
data_names = arrays.index
missing = arrays.isna()
if index is None:
# GH10856
# raise ValueError if only scalars in dict
index = extract_index(arrays[~missing])
else:
index = ensure_index(index)
# no obvious "empty" int column
if missing.any() and not is_integer_dtype(dtype):
if dtype is None or (
not is_extension_array_dtype(dtype)
and np.issubdtype(dtype, np.flexible)
):
# GH#1783
nan_dtype = np.dtype(object)
else:
nan_dtype = dtype
val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)
arrays.loc[missing] = [val] * missing.sum()
else:
keys = list(data.keys())
columns = data_names = Index(keys)
arrays = [com.maybe_iterable_to_list(data[k]) for k in keys]
# GH#24096 need copy to be deep for datetime64tz case
# TODO: See if we can avoid these copies
arrays = [arr if not isinstance(arr, ABCIndex) else arr._data for arr in arrays]
arrays = [
arr if not is_datetime64tz_dtype(arr) else arr.copy() for arr in arrays
]
return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
def nested_data_to_arrays(
data: Sequence,
columns: Optional[Index],
index: Optional[Index],
dtype: Optional[DtypeObj],
):
"""
Convert a single sequence of arrays to multiple arrays.
"""
# By the time we get here we have already checked treat_as_nested(data)
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
if index is None:
if isinstance(data[0], ABCSeries):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = ibase.default_index(len(data[0]))
else:
index = ibase.default_index(len(data))
return arrays, columns, index
def treat_as_nested(data) -> bool:
"""
Check if we should use nested_data_to_arrays.
"""
return len(data) > 0 and is_list_like(data[0]) and getattr(data[0], "ndim", 1) == 1
# ---------------------------------------------------------------------
def _prep_ndarray(values, copy: bool = True) -> np.ndarray:
if not isinstance(values, (np.ndarray, ABCSeries, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
elif isinstance(values, range):
arr = np.arange(values.start, values.stop, values.step, dtype="int64")
return arr[..., np.newaxis]
def convert(v):
return maybe_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if is_list_like(values[0]) or hasattr(values[0], "len"):
values = np.array([convert(v) for v in values])
elif isinstance(values[0], np.ndarray) and values[0].ndim == 0:
# GH#21861
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except (ValueError, TypeError):
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError(f"Must pass 2-d input. shape={values.shape}")
return values
def _homogenize(data, index, dtype: Optional[DtypeObj]):
oindex = None
homogenized = []
for val in data:
if isinstance(val, ABCSeries):
if dtype is not None:
val = val.astype(dtype)
if val.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
val = val.reindex(index, copy=False)
else:
if isinstance(val, dict):
if oindex is None:
oindex = index.astype("O")
if isinstance(index, (ABCDatetimeIndex, ABCTimedeltaIndex)):
val = dict_compat(val)
else:
val = dict(val)
val = lib.fast_multiget(val, oindex._values, default=np.nan)
val = sanitize_array(
val, index, dtype=dtype, copy=False, raise_cast_failure=False
)
homogenized.append(val)
return homogenized
def extract_index(data) -> Index:
"""
Try to infer an Index from the passed data, raise ValueError on failure.
"""
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes: List[Union[List[Label], Index]] = []
have_raw_arrays = False
have_series = False
have_dicts = False
for val in data:
if isinstance(val, ABCSeries):
have_series = True
indexes.append(val.index)
elif isinstance(val, dict):
have_dicts = True
indexes.append(list(val.keys()))
elif is_list_like(val) and getattr(val, "ndim", 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(val))
if not indexes and not raw_lengths:
raise ValueError("If using all scalar values, you must pass an index")
if have_series:
index = union_indexes(indexes)
elif have_dicts:
index = union_indexes(indexes, sort=False)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError("All arrays must be of the same length")
if have_dicts:
raise ValueError(
"Mixing dicts with non-Series may lead to ambiguous ordering."
)
if have_series:
assert index is not None # for mypy
if lengths[0] != len(index):
msg = (
f"array length {lengths[0]} does not match index "
f"length {len(index)}"
)
raise ValueError(msg)
else:
index = ibase.default_index(lengths[0])
return ensure_index(index)
def reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (
columns is not None
and len(columns)
and arr_columns is not None
and len(arr_columns)
):
indexer = ensure_index(arr_columns).get_indexer(columns)
arr_columns = ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def _get_names_from_index(data):
has_some_name = any(getattr(s, "name", None) is not None for s in data)
if not has_some_name:
return ibase.default_index(len(data))
index: List[Label] = list(range(len(data)))
count = 0
for i, s in enumerate(data):
n = getattr(s, "name", None)
if n is not None:
index[i] = n
else:
index[i] = f"Unnamed {count}"
count += 1
return index
def _get_axes(N, K, index, columns) -> Tuple[Index, Index]:
# helper to create the axes as indexes
# return axes or defaults
if index is None:
index = ibase.default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = ibase.default_index(K)
else:
columns = ensure_index(columns)
return index, columns
def dataclasses_to_dicts(data):
"""
Converts a list of dataclass instances to a list of dictionaries.
Parameters
----------
data : List[Type[dataclass]]
Returns
--------
list_dict : List[dict]
Examples
--------
>>> @dataclass
>>> class Point:
... x: int
... y: int
>>> dataclasses_to_dicts([Point(1,2), Point(2,3)])
[{"x":1,"y":2},{"x":2,"y":3}]
"""
from dataclasses import asdict
return list(map(asdict, data))
# ---------------------------------------------------------------------
# Conversion of Inputs to Arrays
def to_arrays(data, columns, dtype: Optional[DtypeObj] = None):
"""
Return list of arrays, columns.
"""
if isinstance(data, ABCDataFrame):
if columns is not None:
arrays = [
data._ixs(i, axis=1).values
for i, col in enumerate(data.columns)
if col in columns
]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
columns = data.dtype.names
if columns is not None:
return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
elif isinstance(data[0], Categorical):
if columns is None:
columns = ibase.default_index(len(data))
return data, columns
elif isinstance(data, np.ndarray) and data.dtype.names is not None:
# e.g. recarray
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
if isinstance(data[0], (list, tuple)):
content, columns = _list_to_arrays(data, columns)
elif isinstance(data[0], abc.Mapping):
content, columns = _list_of_dict_to_arrays(data, columns)
elif isinstance(data[0], ABCSeries):
content, columns = _list_of_series_to_arrays(data, columns)
else:
# last ditch effort
data = [tuple(x) for x in data]
content, columns = _list_to_arrays(data, columns)
content, columns = _finalize_columns_and_data(content, columns, dtype)
return content, columns
def _list_to_arrays(
data: List[Scalar],
columns: Union[Index, List],
) -> Tuple[List[Scalar], Union[Index, List[Axis]]]:
# Note: we already check len(data) > 0 before getting hre
if isinstance(data[0], tuple):
content = lib.to_object_array_tuples(data)
else:
# list of lists
content = lib.to_object_array(data)
return content, columns
def _list_of_series_to_arrays(
data: List,
columns: Union[Index, List],
) -> Tuple[List[Scalar], Union[Index, List[Axis]]]:
if columns is None:
# We know pass_data is non-empty because data[0] is a Series
pass_data = [x for x in data if isinstance(x, (ABCSeries, ABCDataFrame))]
columns = get_objs_combined_axis(pass_data, sort=False)
indexer_cache: Dict[int, Scalar] = {}
aligned_values = []
for s in data:
index = getattr(s, "index", None)
if index is None:
index = ibase.default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = extract_array(s, extract_numpy=True)
aligned_values.append( | algorithms.take_1d(values, indexer) | pandas.core.algorithms.take_1d |
import pandas as pd
import numpy as np
from cleaner import ReviewCleaner
from datetime import datetime
import numpy as np
import os
data_today = datetime.now().strftime("_%d_%m_%Y__%H_%M")
current_directory = os.getcwd()
class Preprocessor:
@staticmethod
def load(namefile='dump.csv', lista_colonne=['FRASE', 'CLASSE']):
reviews_df = pd.read_csv(os.path.join(current_directory, 'raw\\{}'.format(namefile)), header=0)
df = pd.DataFrame(columns=lista_colonne)
# get reviews
lista_recensioni = reviews_df.loc[:, 'text'].tolist()
lista_frasi = ReviewCleaner.to_sentence(lista_recensioni)
df['FRASE'] = lista_frasi
# save sentences in a new excel
df_completo_senza_etichette = df.to_csv(os.path.join(current_directory, 'etichette\lista_frasi_stopwords'+data_today+'.csv'), index=False, encoding='utf-8-sig')
def clean(path=os.path.join(current_directory,'etichette\\')):
lista_csv_etichettati = os.listdir(path)
print(lista_csv_etichettati)
lista_df = []
for file in lista_csv_etichettati:
df_da_pulire = pd.read_csv(path+file, encoding="ISO-8859-1", header=0)
df_da_pulire.dropna(how='any', inplace=True)
lista_df.append(df_da_pulire)
result = pd.concat(lista_df)
# remove stopwords
result['FRASE'] = result['FRASE'].apply(ReviewCleaner.remove_stopwords)
result = result[(result['FRASE'] != '') & (result['CLASSE'] != '')]
result.dropna(how='any', inplace=True)
print(result.shape)
df_pulito = result.to_csv('df_pulito\df_etichettato'+data_today+'.csv', index=False, encoding='utf-8-sig')
def split(namefile='lista_frasi_stopwords.csv', n_rows=2000):
df = | pd.read_csv('etichette\\'+namefile, header=0) | pandas.read_csv |
import string
import numpy as np
from numpy.testing import assert_array_equal
from pandas import DataFrame, MultiIndex, Series
from shapely.geometry import LinearRing, LineString, MultiPoint, Point, Polygon
from shapely.geometry.collection import GeometryCollection
from shapely.ops import unary_union
from geopandas import GeoDataFrame, GeoSeries
from geopandas.base import GeoPandasBase
from geopandas.testing import assert_geodataframe_equal
from geopandas.tests.util import assert_geoseries_equal, geom_almost_equals, geom_equals
from geopandas import _compat as compat
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
def assert_array_dtype_equal(a, b, *args, **kwargs):
a = np.asanyarray(a)
b = np.asanyarray(b)
assert a.dtype == b.dtype
assert_array_equal(a, b, *args, **kwargs)
class TestGeomMethods:
def setup_method(self):
self.t1 = Polygon([(0, 0), (1, 0), (1, 1)])
self.t2 = Polygon([(0, 0), (1, 1), (0, 1)])
self.t3 = Polygon([(2, 0), (3, 0), (3, 1)])
self.tz = Polygon([(1, 1, 1), (2, 2, 2), (3, 3, 3)])
self.tz1 = Polygon([(2, 2, 2), (1, 1, 1), (3, 3, 3)])
self.sq = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
self.sqz = Polygon([(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4)])
self.t4 = Polygon([(0, 0), (3, 0), (3, 3), (0, 2)])
self.t5 = Polygon([(2, 0), (3, 0), (3, 3), (2, 3)])
self.inner_sq = Polygon(
[(0.25, 0.25), (0.75, 0.25), (0.75, 0.75), (0.25, 0.75)]
)
self.nested_squares = Polygon(self.sq.boundary, [self.inner_sq.boundary])
self.p0 = Point(5, 5)
self.p3d = Point(5, 5, 5)
self.g0 = GeoSeries(
[
self.t1,
self.t2,
self.sq,
self.inner_sq,
self.nested_squares,
self.p0,
None,
]
)
self.g1 = GeoSeries([self.t1, self.sq])
self.g2 = GeoSeries([self.sq, self.t1])
self.g3 = GeoSeries([self.t1, self.t2])
self.gz = GeoSeries([self.tz, self.sqz, self.tz1])
self.g3.crs = "epsg:4326"
self.g4 = GeoSeries([self.t2, self.t1])
self.g4.crs = "epsg:4326"
self.g_3d = GeoSeries([self.p0, self.p3d])
self.na = GeoSeries([self.t1, self.t2, Polygon()])
self.na_none = GeoSeries([self.t1, None])
self.a1 = self.g1.copy()
self.a1.index = ["A", "B"]
self.a2 = self.g2.copy()
self.a2.index = ["B", "C"]
self.esb = Point(-73.9847, 40.7484, 30.3244)
self.sol = Point(-74.0446, 40.6893, 31.2344)
self.landmarks = GeoSeries([self.esb, self.sol], crs="epsg:4326")
self.pt2d = Point(-73.9847, 40.7484)
self.landmarks_mixed = GeoSeries([self.esb, self.sol, self.pt2d], crs=4326)
self.l1 = LineString([(0, 0), (0, 1), (1, 1)])
self.l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1)])
self.g5 = GeoSeries([self.l1, self.l2])
self.g6 = GeoSeries([self.p0, self.t3])
self.g7 = GeoSeries([self.sq, self.t4])
self.g8 = GeoSeries([self.t1, self.t5])
self.empty = GeoSeries([])
self.all_none = GeoSeries([None, None])
self.empty_poly = Polygon()
self.g9 = GeoSeries(self.g0, index=range(1, 8))
# Crossed lines
self.l3 = LineString([(0, 0), (1, 1)])
self.l4 = LineString([(0, 1), (1, 0)])
self.crossed_lines = GeoSeries([self.l3, self.l4])
# Placeholder for testing, will just drop in different geometries
# when needed
self.gdf1 = GeoDataFrame(
{"geometry": self.g1, "col0": [1.0, 2.0], "col1": ["geo", "pandas"]}
)
self.gdf2 = GeoDataFrame(
{"geometry": self.g1, "col3": [4, 5], "col4": ["rand", "string"]}
)
self.gdf3 = GeoDataFrame(
{"geometry": self.g3, "col3": [4, 5], "col4": ["rand", "string"]}
)
self.gdfz = GeoDataFrame(
{"geometry": self.gz, "col3": [4, 5, 6], "col4": ["rand", "string", "geo"]}
)
def _test_unary_real(self, op, expected, a):
""" Tests for 'area', 'length', 'is_valid', etc. """
fcmp = assert_series_equal
self._test_unary(op, expected, a, fcmp)
def _test_unary_topological(self, op, expected, a):
if isinstance(expected, GeoPandasBase):
fcmp = assert_geoseries_equal
else:
def fcmp(a, b):
assert a.equals(b)
self._test_unary(op, expected, a, fcmp)
def _test_binary_topological(self, op, expected, a, b, *args, **kwargs):
""" Tests for 'intersection', 'union', 'symmetric_difference', etc. """
if isinstance(expected, GeoPandasBase):
fcmp = assert_geoseries_equal
else:
def fcmp(a, b):
assert geom_equals(a, b)
if isinstance(b, GeoPandasBase):
right_df = True
else:
right_df = False
self._binary_op_test(op, expected, a, b, fcmp, True, right_df, *args, **kwargs)
def _test_binary_real(self, op, expected, a, b, *args, **kwargs):
fcmp = assert_series_equal
self._binary_op_test(op, expected, a, b, fcmp, True, False, *args, **kwargs)
def _test_binary_operator(self, op, expected, a, b):
"""
The operators only have GeoSeries on the left, but can have
GeoSeries or GeoDataFrame on the right.
If GeoDataFrame is on the left, geometry column is used.
"""
if isinstance(expected, GeoPandasBase):
fcmp = assert_geoseries_equal
else:
def fcmp(a, b):
assert geom_equals(a, b)
if isinstance(b, GeoPandasBase):
right_df = True
else:
right_df = False
self._binary_op_test(op, expected, a, b, fcmp, False, right_df)
def _binary_op_test(
self, op, expected, left, right, fcmp, left_df, right_df, *args, **kwargs
):
"""
This is a helper to call a function on GeoSeries and GeoDataFrame
arguments. For example, 'intersection' is a member of both GeoSeries
and GeoDataFrame and can take either GeoSeries or GeoDataFrame inputs.
This function has the ability to test all four combinations of input
types.
Parameters
----------
expected : str
The operation to be tested. e.g., 'intersection'
left: GeoSeries
right: GeoSeries
fcmp: function
Called with the result of the operation and expected. It should
assert if the result is incorrect
left_df: bool
If the left input should also be called with a GeoDataFrame
right_df: bool
Indicates whether the right input should be called with a
GeoDataFrame
"""
def _make_gdf(s):
n = len(s)
col1 = string.ascii_lowercase[:n]
col2 = range(n)
return GeoDataFrame(
{"geometry": s.values, "col1": col1, "col2": col2},
index=s.index,
crs=s.crs,
)
# Test GeoSeries.op(GeoSeries)
result = getattr(left, op)(right, *args, **kwargs)
fcmp(result, expected)
if left_df:
# Test GeoDataFrame.op(GeoSeries)
gdf_left = _make_gdf(left)
result = getattr(gdf_left, op)(right, *args, **kwargs)
fcmp(result, expected)
if right_df:
# Test GeoSeries.op(GeoDataFrame)
gdf_right = _make_gdf(right)
result = getattr(left, op)(gdf_right, *args, **kwargs)
fcmp(result, expected)
if left_df:
# Test GeoDataFrame.op(GeoDataFrame)
result = getattr(gdf_left, op)(gdf_right, *args, **kwargs)
fcmp(result, expected)
def _test_unary(self, op, expected, a, fcmp):
# GeoSeries, (GeoSeries or geometry)
result = getattr(a, op)
fcmp(result, expected)
# GeoDataFrame, (GeoSeries or geometry)
gdf = self.gdf1.set_geometry(a)
result = getattr(gdf, op)
fcmp(result, expected)
# TODO reenable for all operations once we use pyproj > 2
# def test_crs_warning(self):
# # operations on geometries should warn for different CRS
# no_crs_g3 = self.g3.copy()
# no_crs_g3.crs = None
# with pytest.warns(UserWarning):
# self._test_binary_topological('intersection', self.g3,
# self.g3, no_crs_g3)
def test_intersection(self):
self._test_binary_topological("intersection", self.t1, self.g1, self.g2)
with pytest.warns(UserWarning, match="The indices .+ different"):
self._test_binary_topological(
"intersection", self.all_none, self.g1, self.empty
)
assert len(self.g0.intersection(self.g9, align=True) == 8)
assert len(self.g0.intersection(self.g9, align=False) == 7)
def test_union_series(self):
self._test_binary_topological("union", self.sq, self.g1, self.g2)
assert len(self.g0.union(self.g9, align=True) == 8)
assert len(self.g0.union(self.g9, align=False) == 7)
def test_union_polygon(self):
self._test_binary_topological("union", self.sq, self.g1, self.t2)
def test_symmetric_difference_series(self):
self._test_binary_topological("symmetric_difference", self.sq, self.g3, self.g4)
assert len(self.g0.symmetric_difference(self.g9, align=True) == 8)
assert len(self.g0.symmetric_difference(self.g9, align=False) == 7)
def test_symmetric_difference_poly(self):
expected = GeoSeries([GeometryCollection(), self.sq], crs=self.g3.crs)
self._test_binary_topological(
"symmetric_difference", expected, self.g3, self.t1
)
def test_difference_series(self):
expected = GeoSeries([GeometryCollection(), self.t2])
self._test_binary_topological("difference", expected, self.g1, self.g2)
assert len(self.g0.difference(self.g9, align=True) == 8)
assert len(self.g0.difference(self.g9, align=False) == 7)
def test_difference_poly(self):
expected = GeoSeries([self.t1, self.t1])
self._test_binary_topological("difference", expected, self.g1, self.t2)
def test_geo_op_empty_result(self):
l1 = LineString([(0, 0), (1, 1)])
l2 = LineString([(2, 2), (3, 3)])
expected = GeoSeries([GeometryCollection()])
# binary geo resulting in empty geometry
result = GeoSeries([l1]).intersection(l2)
assert_geoseries_equal(result, expected)
# binary geo empty result with right GeoSeries
result = GeoSeries([l1]).intersection(GeoSeries([l2]))
assert_geoseries_equal(result, expected)
# unary geo resulting in emtpy geometry
result = GeoSeries([GeometryCollection()]).convex_hull
assert_geoseries_equal(result, expected)
def test_boundary(self):
l1 = LineString([(0, 0), (1, 0), (1, 1), (0, 0)])
l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)])
expected = GeoSeries([l1, l2], index=self.g1.index, crs=self.g1.crs)
self._test_unary_topological("boundary", expected, self.g1)
def test_area(self):
expected = Series(np.array([0.5, 1.0]), index=self.g1.index)
self._test_unary_real("area", expected, self.g1)
expected = Series(np.array([0.5, np.nan]), index=self.na_none.index)
self._test_unary_real("area", expected, self.na_none)
def test_area_crs_warn(self):
with pytest.warns(UserWarning, match="Geometry is in a geographic CRS"):
self.g4.area
def test_bounds(self):
# Set columns to get the order right
expected = DataFrame(
{
"minx": [0.0, 0.0],
"miny": [0.0, 0.0],
"maxx": [1.0, 1.0],
"maxy": [1.0, 1.0],
},
index=self.g1.index,
columns=["minx", "miny", "maxx", "maxy"],
)
result = self.g1.bounds
assert_frame_equal(expected, result)
gdf = self.gdf1.set_geometry(self.g1)
result = gdf.bounds
assert_frame_equal(expected, result)
def test_bounds_empty(self):
# test bounds of empty GeoSeries
# https://github.com/geopandas/geopandas/issues/1195
s = GeoSeries([])
result = s.bounds
expected = DataFrame(
columns=["minx", "miny", "maxx", "maxy"], index=s.index, dtype="float64"
)
assert_frame_equal(result, expected)
def test_unary_union(self):
p1 = self.t1
p2 = Polygon([(2, 0), (3, 0), (3, 1)])
expected = unary_union([p1, p2])
g = GeoSeries([p1, p2])
self._test_unary_topological("unary_union", expected, g)
def test_contains(self):
expected = [True, False, True, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.contains(self.t1))
expected = [False, True, True, True, True, True, False, False]
assert_array_dtype_equal(expected, self.g0.contains(self.g9, align=True))
expected = [False, False, True, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.contains(self.g9, align=False))
def test_length(self):
expected = Series(np.array([2 + np.sqrt(2), 4]), index=self.g1.index)
self._test_unary_real("length", expected, self.g1)
expected = Series(np.array([2 + np.sqrt(2), np.nan]), index=self.na_none.index)
self._test_unary_real("length", expected, self.na_none)
def test_length_crs_warn(self):
with pytest.warns(UserWarning, match="Geometry is in a geographic CRS"):
self.g4.length
def test_crosses(self):
expected = [False, False, False, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.crosses(self.t1))
expected = [False, True]
assert_array_dtype_equal(expected, self.crossed_lines.crosses(self.l3))
expected = [False] * 8
assert_array_dtype_equal(expected, self.g0.crosses(self.g9, align=True))
expected = [False] * 7
assert_array_dtype_equal(expected, self.g0.crosses(self.g9, align=False))
def test_disjoint(self):
expected = [False, False, False, False, False, True, False]
assert_array_dtype_equal(expected, self.g0.disjoint(self.t1))
expected = [False] * 8
assert_array_dtype_equal(expected, self.g0.disjoint(self.g9, align=True))
expected = [False, False, False, False, True, False, False]
assert_array_dtype_equal(expected, self.g0.disjoint(self.g9, align=False))
def test_relate(self):
expected = Series(
[
"212101212",
"212101212",
"212FF1FF2",
"2FFF1FFF2",
"FF2F112F2",
"FF0FFF212",
None,
],
index=self.g0.index,
)
assert_array_dtype_equal(expected, self.g0.relate(self.inner_sq))
expected = Series(["FF0FFF212", None], index=self.g6.index)
assert_array_dtype_equal(expected, self.g6.relate(self.na_none))
expected = Series(
[
None,
"2FFF1FFF2",
"2FFF1FFF2",
"2FFF1FFF2",
"2FFF1FFF2",
"0FFFFFFF2",
None,
None,
],
index=range(8),
)
assert_array_dtype_equal(expected, self.g0.relate(self.g9, align=True))
expected = Series(
[
"FF2F11212",
"2FF11F212",
"212FF1FF2",
"FF2F1F212",
"FF2FF10F2",
None,
None,
],
index=self.g0.index,
)
assert_array_dtype_equal(expected, self.g0.relate(self.g9, align=False))
def test_distance(self):
expected = Series(
np.array([np.sqrt((5 - 1) ** 2 + (5 - 1) ** 2), np.nan]), self.na_none.index
)
assert_array_dtype_equal(expected, self.na_none.distance(self.p0))
expected = Series(np.array([np.sqrt(4 ** 2 + 4 ** 2), np.nan]), self.g6.index)
assert_array_dtype_equal(expected, self.g6.distance(self.na_none))
expected = Series(np.array([np.nan, 0, 0, 0, 0, 0, np.nan, np.nan]), range(8))
assert_array_dtype_equal(expected, self.g0.distance(self.g9, align=True))
val = self.g0.iloc[4].distance(self.g9.iloc[4])
expected = Series(np.array([0, 0, 0, 0, val, np.nan, np.nan]), self.g0.index)
assert_array_dtype_equal(expected, self.g0.distance(self.g9, align=False))
def test_distance_crs_warning(self):
with pytest.warns(UserWarning, match="Geometry is in a geographic CRS"):
self.g4.distance(self.p0)
def test_intersects(self):
expected = [True, True, True, True, True, False, False]
assert_array_dtype_equal(expected, self.g0.intersects(self.t1))
expected = [True, False]
assert_array_dtype_equal(expected, self.na_none.intersects(self.t2))
expected = np.array([], dtype=bool)
assert_array_dtype_equal(expected, self.empty.intersects(self.t1))
expected = np.array([], dtype=bool)
assert_array_dtype_equal(expected, self.empty.intersects(self.empty_poly))
expected = [False] * 7
assert_array_dtype_equal(expected, self.g0.intersects(self.empty_poly))
expected = [False, True, True, True, True, True, False, False]
assert_array_dtype_equal(expected, self.g0.intersects(self.g9, align=True))
expected = [True, True, True, True, False, False, False]
assert_array_dtype_equal(expected, self.g0.intersects(self.g9, align=False))
def test_overlaps(self):
expected = [True, True, False, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.overlaps(self.inner_sq))
expected = [False, False]
assert_array_dtype_equal(expected, self.g4.overlaps(self.t1))
expected = [False] * 8
assert_array_dtype_equal(expected, self.g0.overlaps(self.g9, align=True))
expected = [False] * 7
assert_array_dtype_equal(expected, self.g0.overlaps(self.g9, align=False))
def test_touches(self):
expected = [False, True, False, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.touches(self.t1))
expected = [False] * 8
assert_array_dtype_equal(expected, self.g0.touches(self.g9, align=True))
expected = [True, False, False, True, False, False, False]
assert_array_dtype_equal(expected, self.g0.touches(self.g9, align=False))
def test_within(self):
expected = [True, False, False, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.within(self.t1))
expected = [True, True, True, True, True, False, False]
assert_array_dtype_equal(expected, self.g0.within(self.sq))
expected = [False, True, True, True, True, True, False, False]
assert_array_dtype_equal(expected, self.g0.within(self.g9, align=True))
expected = [False, True, False, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.within(self.g9, align=False))
def test_covers_itself(self):
# Each polygon in a Series covers itself
res = self.g1.covers(self.g1)
exp = Series([True, True])
assert_series_equal(res, exp)
def test_covers(self):
res = self.g7.covers(self.g8)
exp = Series([True, False])
assert_series_equal(res, exp)
expected = [False, True, True, True, True, True, False, False]
assert_array_dtype_equal(expected, self.g0.covers(self.g9, align=True))
expected = [False, False, True, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.covers(self.g9, align=False))
def test_covers_inverse(self):
res = self.g8.covers(self.g7)
exp = Series([False, False])
assert_series_equal(res, exp)
@pytest.mark.skipif(
not compat.USE_PYGEOS,
reason="covered_by is only implemented for pygeos, not shapely",
)
def test_covered_by(self):
res = self.g1.covered_by(self.g1)
exp = Series([True, True])
assert_series_equal(res, exp)
expected = [False, True, True, True, True, True, False, False]
assert_array_dtype_equal(expected, self.g0.covered_by(self.g9, align=True))
expected = [False, True, False, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.covered_by(self.g9, align=False))
def test_is_valid(self):
expected = Series(np.array([True] * len(self.g1)), self.g1.index)
self._test_unary_real("is_valid", expected, self.g1)
def test_is_empty(self):
expected = Series(np.array([False] * len(self.g1)), self.g1.index)
self._test_unary_real("is_empty", expected, self.g1)
def test_is_ring(self):
expected = Series(np.array([True] * len(self.g1)), self.g1.index)
self._test_unary_real("is_ring", expected, self.g1)
def test_is_simple(self):
expected = Series(np.array([True] * len(self.g1)), self.g1.index)
self._test_unary_real("is_simple", expected, self.g1)
def test_has_z(self):
expected = Series([False, True], self.g_3d.index)
self._test_unary_real("has_z", expected, self.g_3d)
def test_xyz_points(self):
expected_x = [-73.9847, -74.0446]
expected_y = [40.7484, 40.6893]
expected_z = [30.3244, 31.2344]
assert_array_dtype_equal(expected_x, self.landmarks.geometry.x)
assert_array_dtype_equal(expected_y, self.landmarks.geometry.y)
assert_array_dtype_equal(expected_z, self.landmarks.geometry.z)
# mixed dimensions
expected_z = [30.3244, 31.2344, np.nan]
assert_array_dtype_equal(expected_z, self.landmarks_mixed.geometry.z)
def test_xyz_polygons(self):
# accessing x attribute in polygon geoseries should raise an error
with pytest.raises(ValueError):
_ = self.gdf1.geometry.x
# and same for accessing y attribute in polygon geoseries
with pytest.raises(ValueError):
_ = self.gdf1.geometry.y
# and same for accessing z attribute in polygon geoseries
with pytest.raises(ValueError):
_ = self.gdfz.geometry.z
def test_centroid(self):
polygon = Polygon([(-1, -1), (1, -1), (1, 1), (-1, 1)])
point = Point(0, 0)
polygons = GeoSeries([polygon for i in range(3)])
points = GeoSeries([point for i in range(3)])
assert_geoseries_equal(polygons.centroid, points)
def test_centroid_crs_warn(self):
with pytest.warns(UserWarning, match="Geometry is in a geographic CRS"):
self.g4.centroid
def test_convex_hull(self):
# the convex hull of a square should be the same as the square
squares = GeoSeries([self.sq for i in range(3)])
assert_geoseries_equal(squares, squares.convex_hull)
def test_exterior(self):
exp_exterior = GeoSeries([LinearRing(p.boundary) for p in self.g3])
for expected, computed in zip(exp_exterior, self.g3.exterior):
assert computed.equals(expected)
def test_interiors(self):
original = GeoSeries([self.t1, self.nested_squares])
# This is a polygon with no interior.
expected = []
assert original.interiors[0] == expected
# This is a polygon with an interior.
expected = LinearRing(self.inner_sq.boundary)
assert original.interiors[1][0].equals(expected)
def test_interpolate(self):
expected = GeoSeries([Point(0.5, 1.0), Point(0.75, 1.0)])
self._test_binary_topological(
"interpolate", expected, self.g5, 0.75, normalized=True
)
expected = GeoSeries([Point(0.5, 1.0), Point(1.0, 0.5)])
self._test_binary_topological("interpolate", expected, self.g5, 1.5)
def test_interpolate_distance_array(self):
expected = GeoSeries([Point(0.0, 0.75), Point(1.0, 0.5)])
self._test_binary_topological(
"interpolate", expected, self.g5, np.array([0.75, 1.5])
)
expected = GeoSeries([Point(0.5, 1.0), Point(0.0, 1.0)])
self._test_binary_topological(
"interpolate", expected, self.g5, np.array([0.75, 1.5]), normalized=True
)
def test_interpolate_distance_wrong_length(self):
distances = np.array([1, 2, 3])
with pytest.raises(ValueError):
self.g5.interpolate(distances)
def test_interpolate_distance_wrong_index(self):
distances = Series([1, 2], index=[99, 98])
with pytest.raises(ValueError):
self.g5.interpolate(distances)
def test_interpolate_crs_warning(self):
g5_crs = self.g5.copy()
g5_crs.crs = 4326
with pytest.warns(UserWarning, match="Geometry is in a geographic CRS"):
g5_crs.interpolate(1)
def test_project(self):
expected = Series([2.0, 1.5], index=self.g5.index)
p = Point(1.0, 0.5)
self._test_binary_real("project", expected, self.g5, p)
expected = Series([1.0, 0.5], index=self.g5.index)
self._test_binary_real("project", expected, self.g5, p, normalized=True)
s = GeoSeries([Point(2, 2), Point(0.5, 0.5)], index=[1, 2])
expected = Series([np.nan, 2.0, np.nan])
assert_series_equal(self.g5.project(s), expected)
expected = Series([2.0, 0.5], index=self.g5.index)
assert_series_equal(self.g5.project(s, align=False), expected)
def test_affine_transform(self):
# 45 degree reflection matrix
matrix = [0, 1, 1, 0, 0, 0]
expected = self.g4
res = self.g3.affine_transform(matrix)
assert_geoseries_equal(expected, res)
def test_translate_tuple(self):
trans = self.sol.x - self.esb.x, self.sol.y - self.esb.y
assert self.landmarks.translate(*trans)[0].equals(self.sol)
res = self.gdf1.set_geometry(self.landmarks).translate(*trans)[0]
assert res.equals(self.sol)
def test_rotate(self):
angle = 98
expected = self.g4
o = Point(0, 0)
res = self.g4.rotate(angle, origin=o).rotate(-angle, origin=o)
assert geom_almost_equals(self.g4, res)
res = self.gdf1.set_geometry(self.g4).rotate(angle, origin=Point(0, 0))
assert geom_almost_equals(expected, res.rotate(-angle, origin=o))
def test_scale(self):
expected = self.g4
scale = 2.0, 1.0
inv = tuple(1.0 / i for i in scale)
o = Point(0, 0)
res = self.g4.scale(*scale, origin=o).scale(*inv, origin=o)
assert geom_almost_equals(expected, res)
res = self.gdf1.set_geometry(self.g4).scale(*scale, origin=o)
res = res.scale(*inv, origin=o)
assert geom_almost_equals(expected, res)
def test_skew(self):
expected = self.g4
skew = 45.0
o = Point(0, 0)
# Test xs
res = self.g4.skew(xs=skew, origin=o).skew(xs=-skew, origin=o)
assert geom_almost_equals(expected, res)
res = self.gdf1.set_geometry(self.g4).skew(xs=skew, origin=o)
res = res.skew(xs=-skew, origin=o)
assert geom_almost_equals(expected, res)
# Test ys
res = self.g4.skew(ys=skew, origin=o).skew(ys=-skew, origin=o)
assert geom_almost_equals(expected, res)
res = self.gdf1.set_geometry(self.g4).skew(ys=skew, origin=o)
res = res.skew(ys=-skew, origin=o)
assert geom_almost_equals(expected, res)
def test_buffer(self):
original = GeoSeries([Point(0, 0)])
expected = GeoSeries([Polygon(((5, 0), (0, -5), (-5, 0), (0, 5), (5, 0)))])
calculated = original.buffer(5, resolution=1)
assert geom_almost_equals(expected, calculated)
def test_buffer_args(self):
args = dict(cap_style=3, join_style=2, mitre_limit=2.5)
calculated_series = self.g0.buffer(10, **args)
for original, calculated in zip(self.g0, calculated_series):
if original is None:
assert calculated is None
else:
expected = original.buffer(10, **args)
assert calculated.equals(expected)
def test_buffer_distance_array(self):
original = GeoSeries([self.p0, self.p0])
expected = GeoSeries(
[
Polygon(((6, 5), (5, 4), (4, 5), (5, 6), (6, 5))),
Polygon(((10, 5), (5, 0), (0, 5), (5, 10), (10, 5))),
]
)
calculated = original.buffer(np.array([1, 5]), resolution=1)
assert_geoseries_equal(calculated, expected, check_less_precise=True)
def test_buffer_distance_wrong_length(self):
original = GeoSeries([self.p0, self.p0])
distances = np.array([1, 2, 3])
with pytest.raises(ValueError):
original.buffer(distances)
def test_buffer_distance_wrong_index(self):
original = GeoSeries([self.p0, self.p0], index=[0, 1])
distances = Series(data=[1, 2], index=[99, 98])
with pytest.raises(ValueError):
original.buffer(distances)
def test_buffer_empty_none(self):
p = Polygon([(0, 0), (0, 1), (1, 1), (1, 0)])
s = GeoSeries([p, GeometryCollection(), None])
result = s.buffer(0)
assert_geoseries_equal(result, s)
result = s.buffer(np.array([0, 0, 0]))
assert_geoseries_equal(result, s)
def test_buffer_crs_warn(self):
with pytest.warns(UserWarning, match="Geometry is in a geographic CRS"):
self.g4.buffer(1)
with pytest.warns(None) as record:
# do not warn for 0
self.g4.buffer(0)
for r in record:
assert "Geometry is in a geographic CRS." not in str(r.message)
def test_envelope(self):
e = self.g3.envelope
assert np.all(e.geom_equals(self.sq))
assert isinstance(e, GeoSeries)
assert self.g3.crs == e.crs
def test_total_bounds(self):
bbox = self.sol.x, self.sol.y, self.esb.x, self.esb.y
assert isinstance(self.landmarks.total_bounds, np.ndarray)
assert tuple(self.landmarks.total_bounds) == bbox
df = GeoDataFrame(
{"geometry": self.landmarks, "col1": range(len(self.landmarks))}
)
assert tuple(df.total_bounds) == bbox
def test_explode_geoseries(self):
s = GeoSeries(
[MultiPoint([(0, 0), (1, 1)]), MultiPoint([(2, 2), (3, 3), (4, 4)])],
crs=4326,
)
s.index.name = "test_index_name"
expected_index_name = ["test_index_name", None]
index = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 2)]
expected = GeoSeries(
[Point(0, 0), Point(1, 1), Point(2, 2), Point(3, 3), Point(4, 4)],
index=MultiIndex.from_tuples(index, names=expected_index_name),
crs=4326,
)
assert_geoseries_equal(expected, s.explode())
@pytest.mark.parametrize("index_name", [None, "test"])
def test_explode_geodataframe(self, index_name):
s = GeoSeries([MultiPoint([Point(1, 2), Point(2, 3)]), Point(5, 5)])
df = GeoDataFrame({"col": [1, 2], "geometry": s})
df.index.name = index_name
test_df = df.explode()
expected_s = GeoSeries([Point(1, 2), Point(2, 3), Point(5, 5)])
expected_df = GeoDataFrame({"col": [1, 1, 2], "geometry": expected_s})
expected_index = MultiIndex(
[[0, 1], [0, 1]], # levels
[[0, 0, 1], [0, 1, 0]], # labels/codes
names=[index_name, None],
)
expected_df = expected_df.set_index(expected_index)
assert_frame_equal(test_df, expected_df)
@pytest.mark.parametrize("index_name", [None, "test"])
def test_explode_geodataframe_level_1(self, index_name):
# GH1393
s = GeoSeries([MultiPoint([Point(1, 2), Point(2, 3)]), Point(5, 5)])
df = GeoDataFrame({"level_1": [1, 2], "geometry": s})
df.index.name = index_name
test_df = df.explode()
expected_s = GeoSeries([Point(1, 2), Point(2, 3), Point(5, 5)])
expected_df = GeoDataFrame({"level_1": [1, 1, 2], "geometry": expected_s})
expected_index = MultiIndex(
[[0, 1], [0, 1]], # levels
[[0, 0, 1], [0, 1, 0]], # labels/codes
names=[index_name, None],
)
expected_df = expected_df.set_index(expected_index)
assert_frame_equal(test_df, expected_df)
@pytest.mark.skipif(
not compat.PANDAS_GE_025,
reason="pandas explode introduced in pandas 0.25",
)
def test_explode_pandas_fallback(self):
d = {
"col1": [["name1", "name2"], ["name3", "name4"]],
"geometry": [
MultiPoint([(1, 2), (3, 4)]),
MultiPoint([(2, 1), (0, 0)]),
],
}
gdf = GeoDataFrame(d, crs=4326)
expected_df = GeoDataFrame(
{
"col1": ["name1", "name2", "name3", "name4"],
"geometry": [
MultiPoint([(1, 2), (3, 4)]),
MultiPoint([(1, 2), (3, 4)]),
MultiPoint([(2, 1), (0, 0)]),
MultiPoint([(2, 1), (0, 0)]),
],
},
index=[0, 0, 1, 1],
crs=4326,
)
# Test with column provided as arg
exploded_df = gdf.explode("col1")
assert_geodataframe_equal(exploded_df, expected_df)
# Test with column provided as kwarg
exploded_df = gdf.explode(column="col1")
assert_geodataframe_equal(exploded_df, expected_df)
@pytest.mark.skipif(
not compat.PANDAS_GE_11,
reason="ignore_index keyword introduced in pandas 1.1.0",
)
def test_explode_pandas_fallback_ignore_index(self):
d = {
"col1": [["name1", "name2"], ["name3", "name4"]],
"geometry": [
MultiPoint([(1, 2), (3, 4)]),
MultiPoint([(2, 1), (0, 0)]),
],
}
gdf = GeoDataFrame(d, crs=4326)
expected_df = GeoDataFrame(
{
"col1": ["name1", "name2", "name3", "name4"],
"geometry": [
MultiPoint([(1, 2), (3, 4)]),
MultiPoint([(1, 2), (3, 4)]),
MultiPoint([(2, 1), (0, 0)]),
MultiPoint([(2, 1), (0, 0)]),
],
},
crs=4326,
)
# Test with column provided as arg
exploded_df = gdf.explode("col1", ignore_index=True)
assert_geodataframe_equal(exploded_df, expected_df)
# Test with column provided as kwarg
exploded_df = gdf.explode(column="col1", ignore_index=True)
assert_geodataframe_equal(exploded_df, expected_df)
@pytest.mark.parametrize("outer_index", [1, (1, 2), "1"])
def test_explode_pandas_multi_index(self, outer_index):
index = MultiIndex.from_arrays(
[[outer_index, outer_index, outer_index], [1, 2, 3]],
names=("first", "second"),
)
df = GeoDataFrame(
{"vals": [1, 2, 3]},
geometry=[MultiPoint([(x, x), (x, 0)]) for x in range(3)],
index=index,
)
test_df = df.explode()
expected_s = GeoSeries(
[
Point(0, 0),
Point(0, 0),
Point(1, 1),
Point(1, 0),
Point(2, 2),
Point(2, 0),
]
)
expected_df = GeoDataFrame({"vals": [1, 1, 2, 2, 3, 3], "geometry": expected_s})
expected_index = MultiIndex.from_tuples(
[
(outer_index, *pair)
for pair in [(1, 0), (1, 1), (2, 0), (2, 1), (3, 0), (3, 1)]
],
names=["first", "second", None],
)
expected_df = expected_df.set_index(expected_index)
| assert_frame_equal(test_df, expected_df) | pandas.testing.assert_frame_equal |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.