prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import requests
import pandas as pd
import html
from bs4 import BeautifulSoup
class DblpApi:
def __init__(self):
self.session = requests.Session()
self.author_url = 'http://dblp.org/search/author/api'
self.pub_url = 'http://dblp.org/search/publ/api'
def get_pub_list_by_url(self, url):
req = self.session.get(url)
soup = BeautifulSoup(req.content, 'html.parser')
pub_list = [article.get_text() for article in soup.select('span[class="title"]')]
return pub_list
def search_pub(self, pub_name):
params = {
'q': pub_name,
'format': 'json',
'h': 1000,
}
req = self.session.get(self.pub_url, params=params)
data = req.json()
print(data['result'])
def search_author(self, author_input):
# prepare the first query
params = {
'q': author_input,
'format': 'json',
'h': 1000,
}
req = self.session.get(self.author_url, params=params)
data = req.json()
if data['result']['status']['@code'] == '200':
# split the input author name
author_input_list = author_input.split(' ')
author_input_length = len(author_input_list)
# if the first query got no result and the name is ended with a identifier, remove the identifier and retry
if data['result']['hits']['@total'] == '0' and author_input_list[-1].isdigit():
author_input_length -= 1
author_input_list = author_input_list[:author_input_length]
params = {
'q': ' '.join(author_input_list),
'format': 'json',
'h': 1000,
}
req = self.session.get(self.author_url, params=params)
data = req.json()
author_identical = []
curr_counter = data['result']['hits']['@sent']
while True:
# iterate through all result
for author in data['result']['hits']['hit']:
author_info = author['info']
unescaped_name = html.unescape(author_info['author'])
author_name_list = unescaped_name.split(' ')
found = False
# the case that the two names match exactly.
if author_input_list == author_name_list:
# author_identical.append((author_info['author'], author['@id'], author_info['url']))
found = True
# it's a duplicate name in the form of the exact name follow by an four digits identifier.
elif author_input_length + 1 == len(author_name_list) and author_name_list[-1].isdigit():
if author_input_list == author_name_list[:author_input_length]:
# author_identical.append((author_info['author'], author['@id'], author_info['url']))
found = True
# # middle name case, doesn't work for Chinese names.
# elif len(author_name_list) == 3 and author_input_length == 2 and not author_name_list[-1].isdigit():
# if author_name_list[0] == author_name_list[0] and author_name_list[2] == author_name_list[2]:
# found = True
if found:
author_identical.append(
(author_info['author'], author['@id'], author_info['url'], author_input))
# the case that the author has name aliases
elif not found and 'aliases' in author_info:
alias = author_info['aliases']['alias']
# the author has one alias, and it matches the name exactly
if isinstance(alias, str) and html.unescape(alias) == author_input:
author_identical.append(
(author_info['author'], author['@id'], author_info['url'], author_input))
# the author has a list of aliases
elif isinstance(alias, list):
for a in alias:
a_list = html.unescape(a).split(' ')
if a_list == author_input_list:
author_identical.append(
(author_info['author'], author['@id'], author_info['url'], author_input))
elif author_input_length + 1 == len(a_list) and a_list[-1].isdigit():
if author_input_list == a_list[:author_input_length]:
author_identical.append(
(author_info['author'], author['@id'], author_info['url'], author_input))
if curr_counter < data['result']['hits']['@total']:
params = {
'q': author_input,
'format': 'json',
'h': 1000,
'f': curr_counter,
}
req = self.session.get(self.author_url, params=params)
data = req.json()
if data['result']['hits']['@sent'] == '0':
break
curr_counter += data['result']['hits']['@sent']
else:
break
return author_identical
if __name__ == '__main__':
dblp = DblpApi()
df_authors = pd.read_pickle('authors.pkl')
# df_bad = pd.read_pickle('bad.pkl')
#
# df_bad.rename({0: 'author'}, inplace=True, axis=1)
#
# counter = 0
# for row in df_bad.iterrows():
# # print(df_authors[df_authors['author'] == row[1]['author']])
# x = dblp.search_author(row[1]['author'])
# if len(x) == 0:
# counter += 1
# print(df_authors[df_authors['author'] == row[1]['author']])
#
# print(counter)
# df_article = pd.read_pickle('dblp_article_multi_author.pkl')
# print(df_article['title'].iloc[0])
# dblp.search_pub('Object Data Model Facilities for Multimedia Data Types.')
# print(df_authors['author'][15623])
# n = '<NAME>'
#
# x = dblp.search_author(n)
#
# print(x)
#
# req = dblp.session.get(x[0][2])
#
# soup = BeautifulSoup(req.content, 'html.parser')
#
# for y in soup.select('span[class="title"]'):
# print(y.get_text())
#
# pub_author_map = [(article.get_text(), x[0][2]) for article in soup.select('span[class="title"]')]
#
# print(pub_author_map)
# for x in soup.select('#publ-section > div:nth-child(2) > div > ul'):
# print(x)
# for decades in soup.find_all('ul', {'class': 'publ-list'}):
# for decade in decades:
# for year in decade:
# print(year)
# print('----------------------------------')
# print('111111111')
upper_bound = 100000
author_found = []
author_not_found = []
author_bad_format = []
for name in df_authors['author'].tolist()[:upper_bound]:
try:
identical_authors = dblp.search_author(name)
if len(identical_authors) == 0:
author_not_found.append(name)
else:
author_found.extend(identical_authors)
except Exception:
print(f'name {name} does not work.')
author_bad_format.append(name)
print(f'There are {len(author_not_found)} bad data.')
print(author_not_found)
df_not_found = pd.DataFrame(author_not_found)
df_not_found.to_pickle('author_not_found.pkl')
df_found = pd.DataFrame(author_found)
df_found.to_pickle('author_found.pkl')
df_bad_format = | pd.DataFrame(author_bad_format) | pandas.DataFrame |
'''
Training the pair trading system by calculating correlations
between all combinations of stock pairs.
'''
import os
import time
from multiprocessing import Process, Queue
import pandas as pd
import calc
from util import *
# Control the metrics to calculate. Must match the names defined in "calc.py"
METRICS = ["CoInt", "PCC_log", "SSD_SMA3"]
def generate_jobs(stock_data_folder):
'''
Generate all the jobs.
Each job has an id and two stock codes to represent a pair.
'''
stock_codes = [get_stock_code(fname) for fname in os.listdir(stock_data_folder)]
stock_codes.sort()
num_stocks = len(stock_codes)
jobs = []
job_index = 0
for i in range(num_stocks):
for j in range(i + 1, num_stocks):
jobs.append((str(job_index), stock_codes[i], stock_codes[j]))
job_index += 1
return jobs
def generate_new_output_file(output_folder):
'''
Generate a new output file. Return the filename (with path).
'''
num_prev_runs = 0
while os.path.isfile(os.path.join(output_folder, "out_%s.csv" % num_prev_runs)):
num_prev_runs += 1
fname = os.path.join(output_folder, "out_%s.csv" % num_prev_runs)
create_dir_and_file(fname)
return fname
def do_worker(q_in, q_out):
'''
Do all kinds of calculations for a pair of stocks.
'''
while True:
job = q_in.get()
if job is None:
# Mark the end of queue
q_out.put(None)
break
job_id, stock_x, stock_y, df_stock_x, df_stock_y = job
result_df = pd.DataFrame()
result_df['index'] = [job_id]
result_df['Stock_1'] = [stock_x]
result_df['Stock_2'] = [stock_y]
for metric in METRICS:
calc_method = getattr(calc, "calc_" + metric)
metric_res = calc_method(df_stock_x, df_stock_y)
if type(metric_res) is dict:
for sub_metric, sub_val in metric_res.items():
result_df[metric + '_' + sub_metric] = sub_val
elif type(metric_res) is list:
for i, sub_val in enumerate(metric_res):
result_df[metric + '_' + i] = sub_val
else:
result_df[metric] = metric_res
done_job = [job_id, stock_x, stock_y, result_df]
q_out.put(done_job)
def do_io(q_out, num_workers, total_num_jobs):
'''
Do the I/O periodically.
'''
config = load_config()
output_folder = config['TRAINING_OUTPUT_FOLDER']
log_file = config['LOG_FILE']
output_df = pd.DataFrame()
output_filename = generate_new_output_file(output_folder)
update_interval = 1000 # File I/O for every this many jobs done
num_jobs_completed = 0
start_time = time.time()
while True:
job = q_out.get()
if job is None:
num_workers -= 1
if num_workers == 0:
break
else:
continue
result_df = job[3]
output_df = | pd.concat([output_df, result_df]) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
import re
import os, sys
import argparse
from collections import defaultdict
sys.path.append('..')
import settings
import utils
import export_metadata
# def get_record_ids_from_fpaths(ser_fpaths):
# """
# using the df from the calculated features csv
# add a column with record ids
# return df
# """
# ser = ser_fpaths.str.rsplit(pat="/", n=1, expand=True)[1]
# ser = ser.str.split(pat=".", n=1, expand=True)[0]
# ser = ser.astype(int)
# ser.name = 'record_id'
# ser.index.rename("model_id", inplace=True)
# return ser
def create_class_dict(ser):
# use a default dict to return -1 in case key is not found
class_dict = defaultdict(lambda: -1)
classes = np.unique(ser)
indices = np.arange(1,len(classes)+1)
class_dict.update(dict(zip(classes, indices)))
return classes, indices, class_dict
def flatten_series_of_lists(ser:pd.Series):
"""
flatten a series of lists where the relationship between the index and the values
needs to be maintained
ser: pd.Series where the values are lists
"""
indices = []
keys = []
for index, row in ser.iteritems():
if type(row) != list:
pass
else:
for key in row:
indices.append(index)
keys.append(key)
return pd.Series(index=indices, data=keys, name=ser.name)
def has_numbers(inputString):
return bool(re.search(r'\d', inputString))
def split_and_flatten_series(ser, split_char=None):
ser = ser.str.lower()
# split into series of lists strings based split_char
if split_char:
ser = ser.str.split(split_char)
# flatten series of lists
ser = flatten_series_of_lists(ser)
# strip whitespace from strings
if ser.dtype == "O":
ser = ser.str.strip()
return ser
def series_of_lists_to_array(ser, fill_value_index=None):
"""
transform a series of lists (like returned by str.split()) into an array.
By default, lists with differing lengths will be padded with None or
fill_value_index can be used to select an element from each list to pad with.
e.g. fill_value_index=-1 will pad the array to the right with the last element from each list
modified from https://stackoverflow.com/questions/10346336/list-of-lists-into-numpy-array
"""
x=ser.values
length=(np.vectorize(len)(x)).max()
if fill_value_index==None:
y=np.array([xi+[None]*(length-len(xi)) for xi in x])
if type(fill_value_index)==int:
y=np.array([xi+[xi[fill_value_index]]*(length-len(xi)) for xi in x])
return y
def series_of_string_years_to_min_max_years(ser):
"""
get min and max years from date column
assumes the earliest and latest years (in digits) in the string
are the earlist and latest years
ser:pd.Series, date column from the metadata dataframe
returns two pandas series: min_years, max_years
"""
# ser_index = ser.index
fltr = ser.isna()
ser = ser.loc[~fltr]
print("number of rows that were missing values: {:,}".format(fltr.sum()))
# assumes that all years are after 99 A.D.
ptrn = re.compile(r"[0-9]{3,4}")
years = ser.apply(ptrn.findall)
years = series_of_lists_to_array(years, fill_value_index=-1)
years = years.astype(int)
min_years = np.min(years, axis=1)
max_years = np.max(years, axis=1)
return min_years, max_years, ser.index
def insert_min_max_years_into_df(df_meta, min_years, max_years, index):
df_meta.loc[index,"year_min"] = min_years
df_meta.loc[index,"year_max"] = max_years
# convert from floats to pandas ints
df_meta.loc[:,["year_min","year_max"]] = df_meta.loc[:,["year_min","year_max"]].astype("Int64")
#fill in NaN's with -1 to still use numpy int comparisons
fltr = df_meta[["year_min","year_max"]].isna().any(axis=1)
df_meta.loc[fltr,["year_min","year_max"]] = -1
return df_meta
def insert_years_from_text(df):
ser = df["date"].dropna()
min_years, max_years, index = series_of_string_years_to_min_max_years(ser)
df = insert_min_max_years_into_df(df, min_years, max_years, index)
return df
def make_flat_relationships_table(df_meta):
"""
flatten semi-colon separated lists of relationships between works
"""
tser = split_and_flatten_series(df_meta["relations"], split_char=";")
tdf = tser.str.split(",", n=1, expand=True)
tdf = tdf.rename(columns={0:"relationship_type",1:"inventory_number"})
fltr = tdf["relationship_type"].notna() & tdf["inventory_number"].isna() & tdf["relationship_type"].apply(has_numbers)
tdf.loc[fltr,"inventory_number"] = tdf.loc[fltr, "relationship_type"]
tdf.loc[fltr,"relationship_type"] = "undefined"
tdf["relationship_type"] = tdf["relationship_type"].str.replace("doublette","dublette")
tdf["inventory_number"]= tdf["inventory_number"].str.strip()
return tdf
def make_df_of_relationship_types(ser):
"""
ser: pd.Series, column of all relationship types (non unique)
return dataframe with unique values
"""
# filter out na and undefined
fltr = ser != "undefined"
ser = ser.loc[fltr]
classes, indices, class_dict = create_class_dict(ser)
df_relationship = pd.DataFrame(data={"name":classes}, index=indices)
return df_relationship
def nest_relationship_type_ids(df_rel_types, df_relationships):
"""
convert the flat table of relationship types as text, into lists of ids
df_rel_types: pd.DataFrame, the table of unique relationship types. used to create the id dictionary
df_relationships: pd.DataFrame, the flat table of relationship types as text
"""
# map ids into the flat list of relationship types
rel_types_dict = dict(zip(df_rel_types['name'].to_list(), df_rel_types.index.to_list()))
df_relationships['relationship_type_id'] = df_relationships['relationship_type'].map(rel_types_dict)
# create a series with lists of relationship ids (for Django manytomany field)
df_rel_types_list = df_relationships[['relationship_type_id']].sort_index()
df_rel_types_list = df_rel_types_list.reset_index().rename(columns={"index":"record_id"})
df_rel_types_list = df_rel_types_list.drop_duplicates()
df_rel_types_list = df_rel_types_list.groupby('record_id').agg({"relationship_type_id":lambda x: x.tolist()})
return df_rel_types_list
def nest_class_ids(class_dict, ser_to_encode):
"""
convert the flat table of relationship types as text, into lists of ids
df_rel_types: pd.DataFrame, the table of unique relationship types. used to create the id dictionary
df_relationships: pd.DataFrame, the flat table of relationship types as text
returns: pd.DataFrame
"""
ser_name = ser_to_encode.name
# map ids into the flat list of relationship types
ser = ser_to_encode.map(class_dict)
# create a series with lists of relationship ids (for Django manytomany field)
tdf = ser.sort_index().reset_index().rename(columns={"index":"record_id"})
tdf = tdf.drop_duplicates()
# nest encoded ids
tdf = tdf.groupby('record_id').agg({ser_name:lambda x: x.tolist()})
return tdf
def main(args=None):
input_dir = settings.interim_metadata_dir
write_fixtures=1
if args:
input_dir = args.metadata_dir
write_fixtures = args.write_fixtures
# if there is one or more metadata csvs process them, else create a csv with just the filenames
flist = utils.get_list_of_files_in_dir(input_dir, file_types=['csv'])
if flist:
# load metadata interim file
df_list = [pd.read_csv(fpath) for fpath in flist]
df_meta = pd.concat(df_list)
print(f"loaded metadata file with {df_meta.shape[0]} rows")
# check if the images for this metadata have been processed
df_feat = pd.read_csv(settings.interim_features_fpath, usecols=[0,], header=None)
fltr = df_meta['id'].isin(df_feat.iloc[:,0])
df_meta = df_meta.loc[fltr,:]
if 'image_fpath' not in df_meta.columns:
df_meta['image_fpath'] = df_meta['id'].apply(utils.make_fpath_from_id,
args=("../data/processed/ethz/images",))
## create new feature columns
# min/max years
if 'date' in df_meta.columns:
df_meta = insert_years_from_text(df_meta)
#### relationship types ####
# extract information
if 'relations' in df_meta.columns:
df_relationships = make_flat_relationships_table(df_meta)
ser = df_relationships['relationship_type']
fltr = ser != "undefined"
ser = ser.loc[fltr]
classes, indices, class_dict = create_class_dict(ser)
df_rel_types = pd.DataFrame(data={"name":classes}, index=indices)
df_rel_types = make_df_of_relationship_types(ser)
# write relationship types fixture
model_name = 'Relationship'
if write_fixtures:
fixture_lst = export_metadata.df_to_fixture_list(df_rel_types,
app_name='ImageSearch',
model_name=model_name,
use_df_index_as_pk=True,
create_datetimefield_name="created_date",
created_by_field_name=None,
)
export_metadata.write_fixture_list_to_json(fixture_lst,
model_name,
settings.fixtures_dir,
file_name_modifier="")
# encode relationship types in main metadata df
df_relationships = df_relationships.loc[df_relationships['relationship_type'] != 'undefined',:]
# df_rel_types_list = nest_relationship_type_ids(df_rel_types, df_relationships)
ser.name = "relationship_type_id"
df_rel_types_list = nest_class_ids(class_dict, ser)
df_meta = df_meta.merge(df_rel_types_list, how='left',left_index=True, right_index=True)
df_meta = df_meta.drop(columns=["relations"])
#### classification types ####
col_name = 'classification'
if col_name in df_meta.columns:
ser = df_meta[col_name]
ser = ser.str.strip().replace("",np.nan).dropna()
classes, indices, class_dict = create_class_dict(ser)
# write classification fixture
tdf = pd.DataFrame(data={"name":classes}, index=indices)
model_name = 'Classification'
if write_fixtures:
fixture_lst = export_metadata.df_to_fixture_list(tdf,
app_name='ImageSearch',
model_name=model_name,
use_df_index_as_pk=True,
create_datetimefield_name="created_date",
created_by_field_name=None,
)
export_metadata.write_fixture_list_to_json(fixture_lst,
model_name,
settings.fixtures_dir,
file_name_modifier="")
# encode classifications in df
df_meta[col_name + '_id'] = df_meta[col_name].map(class_dict)
#### material_technique ####
col_name = 'material_technique'
if col_name in df_meta.columns:
ser = df_meta[col_name].dropna()
ser = split_and_flatten_series(ser, split_char=",")
classes, indices, class_dict = create_class_dict(ser)
# write fixture
tdf = | pd.DataFrame(data={"name":classes}, index=indices) | pandas.DataFrame |
import abc
import pandas as pd
from pathlib import Path
class DtnAbstractReport(object, metaclass=abc.ABCMeta):
""" Alias name for this report. When saved, this alias will be
used as the name of the report.
"""
_alias = None
def __init__(self, env):
# Store the simulation environment
self.env = env
# File to store the data
dir = env.config['globals'].outdir
file = env.config['globals'].outfile
self.file = dir/file
# Other variables
self.writer = None # Set it to export to .xlsx
self.store = None # Set it to export to .h5
# Cache with the collected data
self.cache = None
def __get__(self, item):
# If you are not calling ``collect_data``, just do the
# normal thing
if item != 'collect_data': return getattr(self, item)
# If you are calling ``collect_data``, do it and set
# the cached value automatically
self.cache = self.collect_data()
# Return the cached value
return self.cache
@property
def alias(self):
alias = self.__class__._alias
if not alias: raise ValueError(str(self) + 'does not have an alias')
return alias
@property
def data(self):
# If the cached value is available, use it
if self.cache: return self.cache
# Collect the data and return it
return self.collect_data()
@abc.abstractmethod
def collect_data(self):
""" Collect the data that needs to be exported by this report
:return: pd.DataFrame: Data frame to be exported
"""
pass
def export_to(self, extension):
# Get format
format = extension.replace('.', '')
# Collect data into data frame
df = self.collect_data()
# Trigger the right exporter
exec(f'self.export_to_{format}(df)', locals(), globals())
def __repr__(self):
return self.__str__()
def __str__(self):
return f'<{self.__class__.__name__}>'
def concat_dfs(data, new_col_names):
""" Consolidate a dictionary of data frames into one data frame
:param dict data: {a: df for a, b: df for b}
:param str/tuple new_col_names: The name for the column [a, b, ...] or columns
:return: Data frame with numeric index
"""
# If data is empty, return empty data frame
if not data: return pd.DataFrame()
# If new_col_names is not iterable, convert it
if not isinstance(new_col_names, (list, tuple)):
new_col_names = [new_col_names]
# Get data frames to concatenate
to_concat = {k: df for k, df in data.items() if not df.empty}
# If no data to concatenate, return empty
if len(to_concat) == 0: return pd.DataFrame()
# Concatenate the data frames
df = | pd.concat(to_concat) | pandas.concat |
# coding: utf-8
# # From Multilayer Networks to Deep Graphs
# ## The Noordin Top Terrorist Data
# ### Preprocessing
# In[1]:
# data i/o
import os
import subprocess
import zipfile
# for plots
import matplotlib.pyplot as plt
# the usual
import numpy as np
import pandas as pd
import deepgraph as dg
# notebook display
# get_ipython().magic('matplotlib inline')
# pd.options.display.max_rows = 10
# pd.set_option('expand_frame_repr', False)
# ### Preprocessing the Nodes
# In[2]:
# zip file containing node attributes
os.makedirs("tmp", exist_ok=True)
get_nodes_zip = ("wget -O tmp/terrorist_nodes.zip "
"https://sites.google.com/site/sfeverton18/"
"research/appendix-1/Noordin%20Subset%20%28ORA%29.zip?"
"attredirects=0&d=1")
subprocess.call(get_nodes_zip.split())
# unzip
zf = zipfile.ZipFile('tmp/terrorist_nodes.zip')
zf.extract('Attributes.csv', path='tmp/')
zf.close()
# create node table
v = pd.read_csv('tmp/Attributes.csv')
v.rename(columns={'Unnamed: 0': 'Name'}, inplace=True)
# create a copy of all nodes for each layer (i.e., create "node-layers")
# there are 10 layers and 79 nodes on each layer
v = pd.concat(10*[v])
# add "aspect" as column to v
layer_names = ['Business', 'Communication', 'O Logistics', 'O Meetings',
'O Operations', 'O Training', 'T Classmates', 'T Friendship',
'T Kinship', 'T Soulmates']
layers = [[name]*79 for name in layer_names]
layers = [item for sublist in layers for item in sublist]
v['layer'] = layers
# set unique node index
v.reset_index(inplace=True)
v.rename(columns={'index': 'V_N'}, inplace=True)
# swap columns
cols = list(v)
cols[1], cols[10] = cols[10], cols[1]
v = v[cols]
# get rid of the attribute columns for demonstrational purposes,
# will be inserted again later
v, vinfo = v.iloc[:, :2], v.iloc[:, 2:]
# ### Preprocessing the Edges
# In[3]:
# paj file containing edges for different layers
get_paj = ("wget -O tmp/terrorists.paj "
"https://sites.google.com/site/sfeverton18/"
"research/appendix-1/Noordin%20Subset%20%28Pajek%29.paj?"
"attredirects=0&d=1")
subprocess.call(get_paj.split())
# get data blocks from paj file
with open('tmp/terrorists.paj') as txtfile:
comments = []
data = []
part = []
for line in txtfile:
if line.startswith('*'):
# comment lines
comment = line
comments.append(comment)
if part:
data.append(part)
part = []
else:
# vertices
if comment.startswith('*Vertices') and len(line.split()) > 1:
sublist = line.split('"')
sublist = sublist[:2] + sublist[-1].split()
part.append(sublist)
# edges or partitions
elif not line.isspace():
part.append(line.split())
# append last block
data.append(part)
# extract edge tables from data blocks
ecomments = []
eparts = []
for i, c in enumerate(comments):
if c.startswith('*Network'):
del data[0]
elif c.startswith('*Partition'):
del data[0]
elif c.startswith('*Vector'):
del data[0]
elif c.startswith('*Arcs') or c.startswith('*Edges'):
ecomments.append(c)
eparts.append(data.pop(0))
# layer data parts (indices found manually via comments)
inds = [11, 10, 5, 6, 7, 8, 0, 1, 2, 3]
eparts = [eparts[ind] for ind in inds]
# convert to DataFrames
layer_frames = []
for name, epart in zip(layer_names, eparts):
frame = pd.DataFrame(epart, dtype=np.int16)
# get rid of self-loops, bidirectional edges
frame = frame[frame[0] < frame[1]]
# rename columns
frame.rename(columns={0: 's', 1: 't', 2: name}, inplace=True)
frame['s'] -= 1
frame['t'] -= 1
layer_frames.append(frame)
# set indices
for i, e in enumerate(layer_frames):
e['s'] += i*79
e['t'] += i*79
e.set_index(['s', 't'], inplace=True)
# concat the layers
e = pd.concat(layer_frames)
# edge table as described in the paper
e_paper = e.copy()
# In[4]:
# alternative representation of e
e['type'] = 0
e['weight'] = 0
for layer in layer_names:
where = e[layer].notnull()
e.loc[where, 'type'] = layer
e.loc[where, 'weight'] = e.loc[where, layer]
e = e[['type', 'weight']]
# ## DeepGraph's Supra-Graph Representation of a MLN, $G = (V, E)$
# Above, we have processed the downloaded data into a node table ``v`` and an edge table ``e``, that correspond to the supra-graph representation of a multilayer network. This is the preferred representation of a MLN by a deep graph, since all other representations are entailed in the supra-graph's partition lattice, as we will demonstrate below.
# In[5]:
g = dg.DeepGraph(v, e)
print(g)
# Let's have a look at the node table first
# In[6]:
print(g.v)
# As you can see, there are 790 nodes in total. Each of the 10 layers,
# In[7]:
print(g.v.layer.unique())
# is comprised of 79 nodes. Every node has a feature of type ``V_N``, indicating the individual the node belongs to, and a feature of type ``layer``, corresponding to the layer the node belongs to. Each of the 790 nodes corresponds to a node-layer of the MLN representation of this data.
#
# The edge table,
# In[8]:
print(g.e)
# In[9]:
g.e['type'].unique()
# which - in the case of this data set - correspond to the layers of the nodes. This is due to the fact that there are no inter-layer connections in the Noordin Top Terrorist Network (such as, e.g., an edge from layer ``Business`` to layer ``Communication`` would be). The edges here are all (undirected) intra-layer edges (e.g., Business $\rightarrow$ Business, Operations $\rightarrow$ Operations).
# To see how the edges are distributed among the different types, you can simply type
# In[10]:
g.e['type'].value_counts()
# Let's have a look at how many "actors" (nodes with at least one connection) there are within each layer
# In[11]:
# append degree
gtg = g.return_gt_graph()
g.v['deg'] = gtg.degree_property_map('total').a
# how many "actors" are there per layer?
g.v[g.v.deg != 0].groupby('layer').size()
# In[12]:
# create graph_tool graph for layout
import graph_tool.draw as gtd
gtg = g.return_gt_graph()
gtg.set_directed(False)
# get sfdp layout postitions
pos = gtd.sfdp_layout(gtg, gamma=.5)
pos = pos.get_2d_array([0, 1])
g.v['x'] = pos[0]
g.v['y'] = pos[1]
# configure nodes
kwds_scatter = {'s': 1,
'c': 'k'}
# configure edges
kwds_quiver = {'headwidth': 1,
'alpha': .3,
'cmap': 'prism'}
# color by type
C = g.e.groupby('type').grouper.group_info[0]
# plot
fig, ax = plt.subplots(1, 2, figsize=(15, 7))
g.plot_2d('x', 'y', edges=True, C=C,
kwds_scatter=kwds_scatter,
kwds_quiver=kwds_quiver, ax=ax[0])
# turn axis off, set x/y-lim
ax[0].axis('off')
ax[0].set_xlim((g.v.x.min() - 1, g.v.x.max() + 1))
ax[0].set_ylim((g.v.y.min() - 1, g.v.y.max() + 1))
# plot adjacency matrix
adj = g.return_cs_graph().todense()
adj = adj + adj.T
inds = np.where(adj != 0)
ax[1].scatter(inds[0], inds[1], c='k', marker='.')
ax[1].grid()
ax[1].set_xlim(-1, 791)
ax[1].set_ylim(-1,791)
# ## Redistributing Information on the Partition Lattice of the MLN
# Based on the types of features ``V_N`` and ``layer``, we can now redistribute the information contained in the supra-graph ``g``. This redistribution allows for several representations of the graph, which we will demonstrate in the following.
# ### The SuperGraph $G^L = (V^L, E^L)$
# Partitioning by the type of feature ``layer`` leads to the supergraph $G^L = (V^L,E^L)$, where every supernode $V^{L}_{i^L} \in V^{L}$ corresponds to a distinct layer, encompassing all its respective nodes. Superedges $E^{L}_{i^L, j^L} \in E^{L}$ with either $i^L = j^L$ or $i^L \neq j^L$ correspond to collections of intra- and
# inter-layer edges of the MLN, respectively.
# In[13]:
# partition the graph
lv, le = g.partition_graph('layer',
relation_funcs={'weight': ['sum', 'mean', 'std']})
lg = dg.DeepGraph(lv, le)
print(lg)
# In[14]:
print(lg.v)
# In[15]:
print(lg.e)
# Let's plot the graph ``g`` grouped by its layers.
# In[16]:
# append layer_id to group nodes by layers
g.v['layer_id'] = g.v.groupby('layer').grouper.group_info[0].astype(np.int32)
# create graph_tool graph object
gtg = g.return_gt_graph(features=['layer_id'])
gtg.set_directed(False)
# get sfdp layout postitions
pos = gtd.sfdp_layout(gtg, groups=gtg.vp['layer_id'], mu=.15)
pos = pos.get_2d_array([0, 1])
g.v['x'] = pos[0]
g.v['y'] = pos[1]
# configure nodes
kwds_scatter = {'s': 10,
'c': 'k'}
# configure edges
kwds_quiver = {'headwidth': 1,
'alpha': .4,
'cmap': 'viridis'}
# color by weight
C = g.e.weight.values
# plot
fig, ax = plt.subplots(figsize=(12, 12))
obj = g.plot_2d('x', 'y', edges=True, C=C,
kwds_scatter=kwds_scatter,
kwds_quiver=kwds_quiver, ax=ax)
# turn axis off, set x/y-lim and name layers
ax.axis('off')
margin = 10
ax.set_xlim((g.v.x.min() - margin, g.v.x.max() + margin))
ax.set_ylim((g.v.y.min() - margin, g.v.y.max() + margin))
for layer in layer_names:
plt.text(g.v[g.v['layer'] == layer].x.mean() - margin * 3,
g.v[g.v['layer'] == layer].y.max() + margin,
layer, fontsize=15)
# We can also plot the supergraph $G^L = (V^L, E^L)$
# In[17]:
# create graph_tool graph of lg
gtg = lg.return_gt_graph(relations=True, node_indices=True, edge_indices=True)
# create plot
gtd.graph_draw(gtg,
vertex_text=gtg.vp['i'], vertex_text_position=-2,
vertex_fill_color='w',
vertex_text_color='k',
edge_text=gtg.ep['n_edges'],
inline=True, fit_view=.8,
output_size=(400,400))
# ### The SuperGraph $G^N = (V^N, E^N)$
# Partitioning by the type of feature ``V_N`` leads to the supergraph $G^{N} = (V^{N}, E^{N})$, where each supernode $V^{N}_{i^N} \in V^{N}$ corresponds to a node of the MLN. Superedges $E^{N}_{i^N j^N} \in E^{N}$ with $i^N = j^N$ correspond to the coupling edges of a MLN.
# In[18]:
# partition by MLN's node indices
nv, ne, gv, ge = g.partition_graph('V_N', return_gve=True)
# for each superedge, get types of edges and their weights
def type_weights(group):
index = group['type'].values
data = group['weight'].values
return pd.Series(data=data, index=index)
ne_weights = ge.apply(type_weights).unstack()
ne = pd.concat((ne, ne_weights), axis=1)
# create graph
ng = dg.DeepGraph(nv, ne)
ng
# In[19]:
print(ng.v)
# In[20]:
print(ng.e)
# Let's plot the graph ``g`` grouped by ``V_N``.
# In[21]:
# create graph_tool graph object
g.v['V_N'] = g.v['V_N'].astype(np.int32) # sfpd only takes int32
g_tmp = dg.DeepGraph(v)
gtg = g_tmp.return_gt_graph(features='V_N')
gtg.set_directed(False)
# get sfdp layout postitions
pos = gtd.sfdp_layout(gtg, groups=gtg.vp['V_N'], mu=.3, gamma=.01)
pos = pos.get_2d_array([0, 1])
g.v['x'] = pos[0]
g.v['y'] = pos[1]
# configure nodes
kwds_scatter = {'c': 'k'}
# configure edges
kwds_quiver = {'headwidth': 1,
'alpha': .2,
'cmap': 'viridis_r'}
# color by type
C = g.e.groupby('type').grouper.group_info[0]
# plot
fig, ax = plt.subplots(figsize=(15,15))
g.plot_2d('x', 'y', edges=True,
kwds_scatter=kwds_scatter, C=C,
kwds_quiver=kwds_quiver, ax=ax)
# turn axis off, set x/y-lim and name nodes
name_dic = {i: name for i, name in enumerate(vinfo.iloc[:79].Name)}
ax.axis('off')
ax.set_xlim((g.v.x.min() - 1, g.v.x.max() + 1))
ax.set_ylim((g.v.y.min() - 1, g.v.y.max() + 1))
for node in g.v['V_N'].unique():
plt.text(g.v[g.v['V_N'] == node].x.mean() - 1,
g.v[g.v['V_N'] == node].y.max() + 1,
name_dic[node], fontsize=12)
# Let's also plot the supergraph $G^N = (V^N, E^N)$, where the color of the superedges corresponds to the number of edges within the respective superedge.
# In[22]:
# get rid of isolated node for nicer layout
ng.v.drop(57, inplace=True, errors='ignore')
# create graph_tool graph object
gtg = ng.return_gt_graph(features=True, relations='n_edges')
gtg.set_directed(False)
# get sfdp layout postitions
pos = gtd.sfdp_layout(gtg)
pos = pos.get_2d_array([0, 1])
ng.v['x'] = pos[0]
ng.v['y'] = pos[1]
# configure nodes
kwds_scatter = {'s': 100,
'c': 'k'}
# configure edges
# split edges with only one type of connection
C_split_0 = ng.e['n_edges'].values.copy()
C_split_0[C_split_0 == 1] = 0
# edges with one type of connection
kwds_quiver_0 = {'alpha': .3,
'width': .001}
# edges with more than one type
kwds_quiver = {'headwidth': 1,
'width': .003,
'alpha': .7,
'cmap': 'Blues',
'clim': (1, ng.e.n_edges.max())}
# create plot
fig, ax = plt.subplots(figsize=(15,15))
ng.plot_2d('x', 'y', edges=True, C_split_0=C_split_0,
kwds_scatter=kwds_scatter, kwds_quiver_0=kwds_quiver_0,
kwds_quiver=kwds_quiver, ax=ax)
# turn axis off, set x/y-lim and name nodes
ax.axis('off')
ax.set_xlim(ng.v.x.min() - 1, ng.v.x.max() + 1)
ax.set_ylim(ng.v.y.min() - 1, ng.v.y.max() + 1)
for i in ng.v.index:
plt.text(ng.v.at[i, 'x'], ng.v.at[i, 'y'] + .3, i, fontsize=12)
# ### The Tensor-Like Representation $G^{NL} = (V^{NL}, E^{NL})$
# In[23]:
# partition the graph
relation_funcs = {'type': 'sum', 'weight': 'sum'} # just to transfer relations
nlv, nle = g.partition_graph(['V_N', 'layer'], relation_funcs=relation_funcs)
nlg = dg.DeepGraph(nlv, nle)
nlg
# In[24]:
print(nlg.v)
# In[25]:
print(nlg.e)
# In[26]:
print(nlg.e.loc[2, 'Communication', :, 'Communication'])
# ## The "Hidden Layers" of a MLN
# In[27]:
print(vinfo)
# As you can see, there are 9 different attributes associated with each individual, such as their military training, nationality, education level, etc. Let's append this information to the node table, and plot the nodes grouped by their education level.
# In[28]:
# append node information to g
v = | pd.concat((v, vinfo), axis=1) | pandas.concat |
## Coin Flip Simulation ##
# Objective: Verify the Central Limit Theorem
# 1) Importing
import random
import pandas as pd
import matplotlib.pyplot as plt
# 2) Functions
def flip_coin(n, t):
"""
Function used to determine the heads count
Ex: n=2 and t=4 --> possible outcome: [H, H, T, T], [[H, T], [T, T], [H, T], [H, H]]
n=3 and t=2 --> possible outcome: [H, H], [[H, T], [T, T]], [[H, T, T], [T, T, H]]
:param n: number of coins
:param t: number of times the coin will be flipped
:return: list with the number of heads
"""
one_flip = [] # one_flip has i elements, i = [1, 2,..., n]
one_turn = [] # one_turn has t elements
all_turns = [] # all_turns has n elements
one_turn_heads = []
all_turns_heads = []
for i in range(1 , n + 1):
for j in range(1 , t + 1):
for k in range(1, i + 1):
one_flip.append(random.randint(0 , 1)) # Heads = 1, Tales = 0
one_turn.append(one_flip[:])
one_turn_heads.append(one_flip.count(1)) # Counting heads in one_flip
one_flip.clear() # Erase all data in one-flip
all_turns.append(one_turn[:])
all_turns_heads.append(one_turn_heads[:])
one_turn.clear() # Erase all data in one_turn
one_turn_heads.clear() # Erase all data in one-turn_heads
return all_turns_heads
# 3) Main code
print('100 coins will be flipped !')
turn = int(input('How many times the coin will be flipped: '))
heads = flip_coin(100, turn)
# 3.1) Creation of dataframes
df1 = pd.DataFrame({'1 coin': heads[0]})
df2 = pd.DataFrame({'2 coins': heads[1]})
df3 = pd.DataFrame({'3 coins': heads[2]})
df4 = pd.DataFrame({'4 coins': heads[3]})
df5 = pd.DataFrame({'5 coins': heads[4]})
df6 = pd.DataFrame({'6 coins': heads[5]})
df18 = pd.DataFrame({'18 coins': heads[17]})
df19 = pd.DataFrame({'19 coins': heads[18]})
df20 = | pd.DataFrame({'20 coins': heads[19]}) | pandas.DataFrame |
#/usr/bin/env python
import os
import argparse
import numpy as np
import pandas as pd
from slugify import slugify
def do_slugify(txt):
try:
return slugify(txt)
except TypeError:
return slugify(txt.decode())
def add_headers(args, data, headers):
data = pd.DataFrame(
np.zeros((6, data.shape[1]), dtype=int),
columns=data.columns
).append(data, ignore_index=True)
data[:2] = headers
for ii in range(data.shape[1]):
data.ix[0,ii] = do_slugify(data.ix[0,ii])
data.ix[1,ii] = do_slugify(data.ix[1,ii])
return data
def merge_sheets(s1, s2):
s1['sheet'] = pd.Series(np.zeros(s1.shape[0], dtype=int), index=s1.index)
s2['sheet'] = pd.Series(np.ones(s2.shape[0], dtype=int), index=s2.index)
s1['index'] = pd.Series(np.arange(0, s1.shape[0], dtype=int), index=s1.index)
s2['index'] = pd.Series(np.arange(0, s2.shape[0], dtype=int), index=s2.index)
res = pd.concat([s1, s2])
res.sort_values(['index', 'sheet'], inplace=True)
del res['sheet']
del res['index']
return res
def read_and_merge(args):
if args.test:
sheet1 = pd.DataFrame([
np.array(['a 0', 'a 1', 'a 2']),
np.array(['one', 'two', 'three']),
np.ones(3, dtype=int)*0,
np.ones(3, dtype=int)*1,
np.ones(3, dtype=int)*2, '-'*3
])
elif not args.csv:
sheet1 = pd.read_excel(args.xls[0], sheetname=args.sheet, header=None)
sheet1 = sheet1.astype(str)
else:
sheet1 = | pd.read_csv(args.xls[0], header=None, dtype=str) | pandas.read_csv |
from __future__ import division
import pandas as pd
import logging
from datetime import datetime
from numpy.random import RandomState
import numpy as np
from trumania.core.util_functions import setup_logging, load_all_logs, build_ids, make_random_bipartite_data
from trumania.core.clock import CyclicTimerProfile, CyclicTimerGenerator
from trumania.core.random_generators import SequencialGenerator, NumpyRandomGenerator, ConstantGenerator
from trumania.core.random_generators import MSISDNGenerator, ParetoGenerator, DependentTriggerGenerator
from trumania.core.circus import Circus
from trumania.core.operations import Chain
from trumania.components.geographies.uganda import WithUganda
from trumania.components.geographies.random_geo import WithRandomGeo
from trumania.components.social_networks.erdos_renyi import WithErdosRenyi
from trumania.components.time_patterns.profilers import HighWeekDaysTimerGenerator
from trumania.core import operations
# couple of utility methods called in Apply of this scenario
def compute_call_value(story_data):
"""
Computes the value of a call based on duration, onnet/offnet and time
of the day.
This is meant to be called in an Apply of the CDR use case
"""
price_per_second = 2
# no, I'm lying, we just look at duration, but that's the idea...
df = story_data[["DURATION"]] * price_per_second
# must return a dataframe with a single column named "result"
return df.rename(columns={"DURATION": "result"})
def compute_sms_value(story_data):
"""
Computes the value of an call based on duration, onnet/offnet and time
of the day.
"""
return | pd.DataFrame({"result": 10}, index=story_data.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@author: <NAME> - https://www.linkedin.com/in/adamrvfisher/
"""
#This is a two asset portfolio tester with a brute force optimizer
#Takes all pair combos, tests, sorts, returns optimal params from all pairs + top performing pair params
#Import modules
import numpy as np
import random as rand
import pandas as pd
import time as t
from YahooGrabber import YahooGrabber
from ListPairs import ListPairs
from pandas.parser import CParserError
#Empty data structure
Empty = []
Counter = 0
Counter2 = 0
Dataset2 = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import nltk
import matplotlib.pyplot as plt
import string
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics import mean_squared_error
from scipy.spatial.distance import correlation, cosine
import sklearn.metrics as metrics
import json
import io
import time
import re
import sys
import math
from scipy import sparse
from scipy import stats
# generate R matrix
def textProcessing(text):
# lower words
text = text.lower()
# remove punctuation
for c in string.punctuation:
text = text.replace(c, ' ')
# tokenize
wordLst = nltk.word_tokenize(text)
# stop word
filtered = [w for w in wordLst if w not in stopwords.words('english')]
# keep noun
refiltered =nltk.pos_tag(filtered)
filtered = [w for w, pos in refiltered if pos.startswith('NN')]
# xtract the stem
ps = PorterStemmer()
filtered = [ps.stem(w) for w in filtered]
return " ".join(filtered)
def rating_proportion(text,rate):
return text * int(rate)
def lda(review,n_topic = 10,n_top_words=20):
# vectorization
# generate the word-docu matrix
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,stop_words='english')
tf = tf_vectorizer.fit_transform(review)
# train the lda model
lda = LatentDirichletAllocation(n_topics=n_topic,
max_iter=50,
learning_method='batch')
lda.fit(tf)
# print the performance
print('perplexity is: ',lda.perplexity(tf))
# generate the top word list for every topic
tf_feature_names = tf_vectorizer.get_feature_names()
feature_dict = {k: v for v, k in enumerate(tf_feature_names)}
# for topic_idx, topic in enumerate(lda.components_):
# print ("Topic #%d:" % topic_idx)
# print (" ".join([tf_feature_names[i]
# for i in topic.argsort()[:-n_top_words - 1:-1]]))
# return the topic*word distribution matrix
return lda.components_,feature_dict,lda.perplexity(tf)
def topic_probability(document,feature_dict,topic_word_matrix):
word_list = document.split()
topic_num = len(topic_word_matrix)
topic_probability = {k:0 for k in range(topic_num)}
for topic_idx in range(topic_num):
for word in word_list:
if word in feature_dict.keys():
topic_probability[topic_idx] += topic_word_matrix[topic_idx,feature_dict[word]]
return topic_probability
def normalize(x):
normalized_list = []
new_dict = {}
for key,value in x.items():
normalized_list.append(value**2)
for key, value in x.items():
if sum(normalized_list) == 0:
new_dict[key] = 0
else:
new_dict[key] = value / math.sqrt(sum(normalized_list))
return new_dict
# prediction
# KL
def user_user_rating_prediction(user_id=1, biz_id=1, top_k=15):
user_i_mean_rating = user_mean_rating_dic[user_id]
if get_top_k_similar_user(user_id, k=top_k, biz_id =biz_id ) == 'not enough similar users':
print('not enough data! return mean rating of this user!')
return user_i_mean_rating
else:
top_similar_user_list = get_top_k_similar_user(user_id, k=top_k)
# list to store similar user info for calculation: mean rating, rating given biz_id, similarity
u_info = []
for similar_user in top_similar_user_list:
mean_rating = user_mean_rating_dic[similar_user]
rating_u_i = rating_matrix[similar_user, biz_id]
similarity = user_similarity[user_id][similar_user]
u_info.append([mean_rating, rating_u_i, similarity])
similar_user_rating = np.sum([(u_info[i][1] - u_info[i][0]) * u_info[i][2] for i in range(top_k)])
sum_of_similarity = np.sum([u_info[i][2] for i in range(top_k)])
predicted_rating = user_i_mean_rating + similar_user_rating / sum_of_similarity
return predicted_rating
def biz_biz_rating_predation(user_id=1, biz_id=1, top_k=5):
user_i_mean_rating = user_mean_rating_dic[user_id]
if get_top_k_similar_items(user_id = user_id, k = top_k, biz_id = biz_id) == 'not enough similar items':
print('not enough data! return mean rating of this user!')
return user_i_mean_rating
else:
top_similar_item_list = get_top_k_similar_items(user_id = user_id, k = top_k, biz_id = biz_id)
'''
# Need to be revised, must have bug
# list to store similar item info for calculation: mean rating, rating given biz_id, similarity
item_info = []
for similar_item in top_similar_item_list:
mean_rating = user_mean_rating_dic[similar_item]
rating_u_i = rating_matrix[similar_item, biz_id]
similarity = user_similarity[user_id][similar_item]
item_info.append([mean_rating, rating_u_i, similarity])
similar_user_rating = np.sum([(item_info[i][1] - item_info[i][0]) * item_info[i][2] for i in range(top_k)])
sum_of_similarity = np.sum([item_info[i][2] for i in range(top_k)])
predicted_rating = user_i_mean_rating + similar_user_rating / sum_of_similarity
'''
return None
def get_top_k_similar_user(user_id = None,k= 15, biz_id = None):
#get the similar user_id who have rated the ite
user_rated_item_id = [id_ for id_ in range(user_num) if rating_matrix[id_,biz_id]!=0]
#find the most similar user
if len(user_rated_item_id)< k:
return 'not enough similar users'
else:
index_list = np.argsort(user_similarity[user_id])[-k-1:-1]
return index_list
def KL_sim(a,b):
KL_ab = stats.entropy(a,b)
KL_ba = stats.entropy(b,a)
return np.exp(-(KL_ab+KL_ba)/2)
# Cosine
def findksimilarusers(user_id, ratings, metric = 'cosine', k=1):
similarities=[]
indices=[]
model_knn = NearestNeighbors(metric = metric, algorithm = 'brute')
model_knn.fit(ratings)
distances, indices = model_knn.kneighbors(ratings.iloc[user_id-1, :].values.reshape(1, -1), n_neighbors = k+1)
similarities = 1-distances.flatten()
print('{0} most similar users for User {1}:\n'.format(k,user_id))
for i in range(0, len(indices.flatten())):
if indices.flatten()[i]+1 == user_id:
continue
else:
print('{0}: User {1}, with similarity of {2}'.format(i, indices.flatten()[i]+1, similarities.flatten()[i]))
return similarities,indices
def cos_predict_userbased(user_id, item_id, ratings, metric = 'cosine', k=5):
prediction=0
similarities, indices=findksimilarusers(user_id, ratings,metric, k) #similar users based on cosine similarity
mean_rating = ratings.loc[user_id,:].mean() #to adjust for zero based indexing
sum_wt = np.sum(similarities)-1
product=1
wtd_sum = 0
for i in range(0, len(indices.flatten())):
if indices.flatten()[i]+1 == user_id:
continue
else:
ratings_diff = ratings.iloc[indices.flatten()[i],item_id]-np.mean(ratings.iloc[indices.flatten()[i],:])
product = ratings_diff * (similarities[i])
wtd_sum = wtd_sum + product
prediction = int(round(mean_rating + (wtd_sum/sum_wt)))
print('\nPredicted rating for user {0} -> item {1}: {2}'.format(user_id,item_id,prediction))
return prediction
def cos_predict_itembased(user_id, item_id, ratings, metric = 'cosine', k=5):
prediction= wtd_sum =0
similarities, indices=findksimilarusers(user_id, ratings,metric, k) #similar users based on correlation coefficients
mean_rating = ratings.loc[:,item_id].mean()
sum_wt = np.sum(similarities)-1
product = 1
wtd_sum = 0
for i in range(0, len(indices.flatten())):
if indices.flatten()[i]+1 == item_id:
continue
else:
product = ratings.iloc[user_id,indices.flatten()[i]] * (similarities[i])
wtd_sum = wtd_sum + product
prediction = int(round(wtd_sum/sum_wt))
print ('\nPredicted rating for user {0} -> item {1}: {2}'.format(user_id,item_id,prediction)
)
return prediction
##### run
# import data
time1 = time.time()
review1 = pd.read_csv('review1.csv')
review2 = pd.read_csv('review2.csv')
review3 = | pd.read_csv('review3.csv') | pandas.read_csv |
# Copyright (C) 2021 ServiceNow, Inc.
""" Functionality for training all keyword prediction downstream models
and building the downstream task dataset
"""
import pandas as pd
from typing import Union, List, Callable
import tqdm
import datetime
import random
import pathlib
import numpy as np
import subprocess
import sys
import joblib
import os
import re
import json
import wandb
import sklearn
import nrcan_p2.data_processing.pipeline_utilities as pu
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.model_selection import cross_validate
from sklearn.model_selection import GroupShuffleSplit, GridSearchCV
from sklearn.model_selection import train_test_split
from filelock import FileLock
from imblearn.over_sampling import RandomOverSampler
from nrcan_p2.data_processing.vectorization import convert_dfcol_text_to_vector
from sklearn.metrics import (
accuracy_score,
precision_recall_fscore_support,
multilabel_confusion_matrix,
confusion_matrix
)
from keras import backend as K
from tensorflow import keras
import tensorflow as tf
from tensorflow.keras import layers
import pandas as pd
import pathlib
from gensim.test.utils import datapath, get_tmpfile
from gensim.models import KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.callbacks import Callback
from gensim.test.utils import datapath, get_tmpfile
from gensim.models import KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
#from sklearn.multioutput import MultiOutputClassifier
from nrcan_p2.evaluation.sklearn_multioutput import MultiOutputClassifier
from nrcan_p2.evaluation.bert_keyword_prediction import main as run_bert_multiclass_script
class SupressSettingWithCopyWarning:
""" To be used in with blocks to suppress SettingWithCopyWarning """
def __enter__(self):
pd.options.mode.chained_assignment = None
def __exit__(self, *args):
pd.options.mode.chained_assignment = 'warn'
def produce_text_column(
df:pd.DataFrame,
text_column:str,
text_column_is_list:bool,
text_col_processing:str,
pre_pipeline:str,
post_pipeline:str,
):
""" Given a raw metadata df, produce the "text" column,
adding "keyword_text" column to the df.
This assumes that the input text column is either a str
or a list of str that should be somehow converted to a single str
:returns: input df, with an extra column 'keyword_text' with the
processed text from column 'text_column
"""
output_col = 'keyword_text'
# get the text
df[output_col] = df[text_column]
# the text column might be a list,
# convert to a string as necesssary, using the
# text_col_processing method
if text_column_is_list:
if text_col_processing == 'join':
df[output_col] = df[output_col].str.join(' ')
elif text_col_processing == 'first':
df[output_col] = df[output_col].str[0]
else:
raise ValueError('Unknown text_col_processing')
if pre_pipeline is not None:
dff = pu.run_pipeline(df[[output_col]],
col=output_col,
next_col=output_col,
preprocessing_pipe=pre_pipeline)
else:
dff = df
if post_pipeline is not None:
dff[output_col] = dff.apply(
lambda row: pu.run_pipeline(
row.to_frame().transpose(),
col=output_col,
next_col=output_col,
postmerge_preprocessing_pipe=post_pipeline),
axis=1
)
df[output_col] = dff[output_col]
return df
def produce_cat_column(
df,
keyword_col,
pre_pipeline,
post_pipeline
):
""" Given a raw metadata df, produce the "category" column,
adding "keyword_cat" column to the df.
This assumes that the input category column is a list of
strings.
:returns: input df, with an extra column 'keyword_cat' with the
processed text from column indicated by 'keyword_col'
"""
output_col = 'keyword_cat'
df = df.copy()
df[output_col] = df[keyword_col]
if pre_pipeline is None and post_pipeline is None:
return df
# assume it's a list of keywords
df_kw = df.explode(column=output_col)
if pre_pipeline is not None:
df_kw = pu.run_pipeline(df_kw[[output_col]],
col=output_col,
next_col=output_col,
preprocessing_pipe=pre_pipeline)
if post_pipeline is not None:
df_kw[output_col] = df_kw.apply(
lambda row: pu.run_pipeline(
row.to_frame().transpose(),
col=output_col,
next_col=output_col,
postmerge_preprocessing_pipe=post_pipeline),
axis=1
)
df_kw = df_kw.reset_index().groupby(['index']).agg(lambda x: list(x))
# the previous step inserts nan values into what should be empty lists. remove them
df_kw[output_col] = df_kw[output_col].apply(lambda x: [xx for xx in x if xx is not None and type(xx) == 'str'])
df[output_col] = df_kw[output_col]
return df
def produce_keyword_classification_dataset_from_df(
df_parquet:Union[str,pd.DataFrame],
pre_pipeline:str,
post_pipeline:str,
cat_pre_pipeline:str,
cat_post_pipeline:str,
text_column:str,
text_column_is_list:bool,
text_col_processing:str,
keyword_col:str,
n_categories:int,
task:str,
n_negative_sample:int,
do_not_drop:bool=False,
):
""" Produce a keyword classification dataset
:param df_parquet: the raw metadata file for produce a keyword dataset
as either a df or the name of a parquet file to load
:param pre_pipeline: the name of an NRCan "pre" pipeline to be used
to process the text column. A pre pipeline is one that operates
at the textbox level.
:param post_pipeline: the name of an NRCan "post" pipeline to be used
to process the text column after pre_pipeline. A post pipeline
is one that operates on the textboxes once merged, but will be
applied here per example in the input df
:param cat_pre_pipeline: the name of an NRCan "pre" pipeline to be used
to process the category column
:param cat_post_pipeline: the name of an NRCan "post" pipeline to be used
to process the category column
:param text_column: the name of the text column in the input
:param text_column_is_list: whether or not the text column is a str
or a list of str
:param keyword_col: the name of the keyword column in the input
:param n_categories: the top-n categories to maintain
:param task: the type of dataset to produce, MULTICLASS or PAIRING
:param n_negative_samples: the number of negative samples for the PAIRING
task, None to get all negative samples
:param do_not_drop: whether to not drop rows with null values
:returns: df with the columns
MULTICLASS: 'keyword_text', 'cat_X' ... for each X in the output categories
keyword_text is the text input
cat_X is 0/1 indicating the presence of a category
PAIRING: 'keyword_text', 'cat', 'label'
keyword_text is the text input
cat is the category name
label is 0/1 to indicate whether the cat matches the keyword_text
"""
if type(df_parquet) == str:
df = pd.read_parquet(df_parquet)
else:
df = df_parquet
with SupressSettingWithCopyWarning():
df = produce_text_column(
df,
text_column=text_column,
text_column_is_list=text_column_is_list,
text_col_processing=text_col_processing,
pre_pipeline=pre_pipeline,
post_pipeline=post_pipeline,
)
# get the subject
# drop None values in the keywords
with SupressSettingWithCopyWarning():
if task == 'MULTICLASS':
df['keyword_cat'] = df[keyword_col].apply(lambda x: [xx.strip() for xx in x if xx is not None] if x is not None else [])
else:
df['keyword_cat'] = df[keyword_col].apply(lambda x: [xx.strip() for xx in x if xx is not None] if x is not None else x)
df = produce_cat_column(
df,
keyword_col='keyword_cat',
pre_pipeline=cat_pre_pipeline,
post_pipeline=cat_post_pipeline,
)
vc = df['keyword_cat'].explode().value_counts()
if n_categories is None:
vc_subset = vc.index
else:
vc_subset = vc.index[0:n_categories]
if task == "MULTICLASS":
assert df.index.is_unique
mlb = MultiLabelBinarizer()
# multiclass classifier, produce one column per label
if not do_not_drop:
print(df.shape)
df = df.dropna(subset=['keyword_cat'])
print(df.shape)
t = mlb.fit_transform(df.keyword_cat)
Y = pd.DataFrame(t,columns=['cat_' + c for c in mlb.classes_])
Y = Y[['cat_' + c for c in vc_subset]]
df_ret = pd.merge(df, Y, right_index=True, left_index=True)
elif task == "PAIRING":
if not do_not_drop:
print('Dropping...')
print(df.shape)
df = df.dropna(subset=['keyword_cat'])
print(df.shape)
full_vc_set = set(vc_subset)
if n_negative_sample is not None:
def get_sampled_categories(x):
# Sample the desired number of negative examples
rest = full_vc_set.difference(x)
# if there are more elements than the sample we want, take a sample
if len(rest) > n_negative_sample:
rest = random.sample(full_vc_set.difference(x),n_negative_sample)
# otherwise, just use the full set
# probably unnecessary to check for nulls, but just in case...
if len(rest) == 0:
return None
else:
return rest
df['cat_negative_sample'] = df.keyword_cat.apply(get_sampled_categories)
else:
def get_remaining_categories(x):
# Produce all negative examples
rest = list(full_vc_set.difference(x))
if len(rest) == 0:
return None
else:
return rest
df['cat_negative_sample'] = df.keyword_cat.apply(get_remaining_categories)
print('Dropping negative samples...')
print(df.shape)
df = df.dropna(subset=['cat_negative_sample'])
print(df.shape)
df_pos = df.explode(column='keyword_cat')
df_pos['label'] = 1
df_pos['cat'] = df_pos['keyword_cat']
df_neg = df.explode(column='cat_negative_sample')
df_neg['label'] = 0
df_neg['cat'] = df_neg['cat_negative_sample']
df_ret = pd.concat([df_pos, df_neg])
df_ret = df_ret.drop(columns=['cat_negative_sample', 'keyword_cat']) #'cat_negative',
elif task == "PREDICT":
raise NotImplementedError()
return df_ret
def load_glove_model(model_path):
print(f'Loading model from {model_path}...')
glove_file = datapath(model_path)
tmp_file_name = f"{pathlib.Path(model_path).parent}/tmp_word2vec.txt"
with FileLock(str(tmp_file_name) + ".lock"):
tmp_file = get_tmpfile(tmp_file_name)
_ = glove2word2vec(glove_file, tmp_file)
model = KeyedVectors.load_word2vec_format(tmp_file)
return model
class KerasValidMetrics(Callback):
def __init__(self, val_data, batch_size = 32):
super().__init__()
self.validation_data = val_data
#self.batch_size = batch_size
def on_train_begin(self, logs={}):
self.val_micro_precision = []
self.val_micro_recall = []
self.val_micro_fb1 = []
self.val_macro_precision = []
self.val_macro_recall = []
self.val_macro_fb1 = []
self.val_sample_precision = []
self.val_sample_recall = []
self.val_sample_fb1 = []
self.val_sample_support = []
self.val_accuracy = []
def on_epoch_end(self, epoch, logs={}):
val_predict = (np.asarray(self.model.predict(self.validation_data[0]))).round()
val_targ = self.validation_data[1]
metrics = compute_metrics_multiclass(val_targ, val_predict)
self.val_micro_precision.append(metrics['micro-precision'])
self.val_macro_precision.append(metrics['macro-precision'])
self.val_micro_recall.append(metrics['micro-recall'])
self.val_macro_recall.append(metrics['macro-recall'])
self.val_micro_fb1.append(metrics['micro-fb1'])
self.val_macro_fb1.append(metrics['macro-fb1'])
self.val_accuracy.append(metrics['accuracy'])
self.val_sample_precision.append(metrics['sample-precision'])
self.val_sample_recall.append(metrics['sample-recall'])
self.val_sample_fb1.append(metrics['sample-fb1'])
self.val_sample_support.append(metrics['support'])
print(f" - val_micro-precision: {metrics['micro-precision']} - val_micro-recall: {metrics['micro-recall']} - val_micro_fb1: {metrics['micro-fb1']}")
print(f" - val_macro-precision: {metrics['macro-precision']} - val_macro-recall: {metrics['macro-recall']} - val_macro_fb1: {metrics['macro-fb1']}")
print(f" - val_accuracy: {metrics['accuracy']}")
print(f" - val_sample_precision: {metrics['sample-precision']}")
print(f" - val_sample_recall: {metrics['sample-recall']}")
print(f" - val_sample_fb1: {metrics['sample-fb1']}")
print(f" - val_sample_support: {metrics['support']}")
return
def run_keyword_prediction_keras(
data_dir,
output_dir,
n_splits,
n_rerun=5,
keyword_text_col='sentence1',
label_col='label',
keyword_cat_col='cat',
task='MULTICLASS',
use_class_weight=False,
embedding_model_path=None,
njobs=None,
existing_run_dir=None,
):
""" Train a model with keras """
saved_args = locals()
maxlen = 200
if existing_run_dir is not None:
print('Starting from an existing run...')
output_dir = pathlib.Path(existing_run_dir)
assert output_dir.exists()
else:
now = datetime.datetime.today().strftime('%Y-%m-%d-%H-%M-%S%f')
output_dir_parent = pathlib.Path(output_dir)
output_dir = pathlib.Path(output_dir) / f'run-glove-keras-{task}_{now}'
output_dir.mkdir(parents=False, exist_ok=False)
input_df_log = output_dir / "input_data.log"
if not input_df_log.exists():
with open(input_df_log, 'w') as f:
json.dump({k: v.__name__ if callable(v) else v
for k,v in saved_args.items()}, f, indent=4)
embedding_model = load_glove_model(embedding_model_path)
else:
with open(input_df_log) as f:
loaded_args = json.load(f)
data_dir = loaded_args['data_dir']
n_splits = loaded_args['n_splits']
n_rerun = loaded_args['n_rerun']
keyword_text_col = loaded_args['keyword_text_col']
label_col = loaded_args['label_col']
keyword_cat_col = loaded_args['keyword_cat_col']
task = loaded_args['task']
use_class_weight = loaded_args['use_class_weight']
assert type(use_class_weight) == bool
embedding_model_path = loaded_args['embedding_model_path']
njobs = loaded_args['njobs']
print('replacing...')
print(saved_args)
print('with..')
print(loaded_args)
embedding_model = load_glove_model(embedding_model_path)
data_dir = pathlib.Path(data_dir)
models_all = {}
cv_scores_all = {}
for i in range(0,n_splits):
suffix = '.csv'
print('--------------------------------------------------')
print(f"Training split {i}...")
train_file = data_dir / f"split_{i}" / ("train" + suffix)
print(f"Train file: {train_file}")
train_df = pd.read_csv(train_file)
valid_file = data_dir / f"split_{i}" / ("valid" + suffix)
print(f"Valid file: {valid_file}")
valid_df = pd.read_csv(valid_file)
valid_df = valid_df.fillna("")
# we let the tokenizer build on both the train/val because we might
# have representations for tokens in val in our embedding already
# but these might not be in the training set
print('Building tokenizer...')
tokenizer = Tokenizer(
num_words=None,
filters="",
lower=True,
split=" ",
char_level=False,
oov_token=None,
document_count=0,
)
tokenizer.fit_on_texts(pd.concat([train_df,valid_df])[keyword_text_col].values)
train_sequences_sent1 = tokenizer.texts_to_sequences(train_df[keyword_text_col].values)
valid_sequences_sent1 = tokenizer.texts_to_sequences(valid_df[keyword_text_col].values)
word_index = tokenizer.word_index
print(f'Found {len(word_index)} unique tokens in {keyword_text_col}.')
if task == 'MULTICLASS':
X_train = keras.preprocessing.sequence.pad_sequences(train_sequences_sent1, maxlen=maxlen)
X_test = keras.preprocessing.sequence.pad_sequences(valid_sequences_sent1, maxlen=maxlen)
print(X_train.shape)
print(X_test.shape)
cols = train_df.filter(regex=keyword_cat_col).columns
Y_train = train_df.loc[:,cols].values
Y_test = valid_df.loc[:,cols].values
class_weights = Y_train.shape[0]/Y_train.sum(axis=0)
class_weights_per_datum = np.dot(Y_train, class_weights)
elif task == 'PAIRING':
raise NotImplementedError()
else:
raise ValueError(f'Unknown task {task}')
models={}
cv_scores={}
for j in range(n_rerun):
print(f"Training rerun {j}...")
sub_output_dir = output_dir / f"split_{i}_run_{j}"
print(f'...{sub_output_dir}')
if existing_run_dir is not None:
model_save_name = pathlib.Path(sub_output_dir) / "model.keras"
model_cv_results_file = pathlib.Path(sub_output_dir) / "model_history.json"
scores_file = pathlib.Path(sub_output_dir) / "metrics.json"
if model_save_name.exists() and model_cv_results_file.exists() and scores_file.exists():
print(f'...already trained for split {i} run {j}. Skipping...')
continue
else:
sub_output_dir.mkdir(parents=False, exist_ok=False)
keras_model = build_keras_model(
embedding_model=embedding_model,
task=task,
output_classes_shape=Y_train.shape[1]
)
def weighted_binary_crossentropy(y_true, y_pred, class_weights):
return K.mean(K.binary_crossentropy(tf.cast(y_true, tf.float32), y_pred) * class_weights, axis=-1)
if use_class_weight:
print(f'Training using class weighted loss..')
loss = lambda y_true, y_pred: weighted_binary_crossentropy(y_true, y_pred, class_weights)
else:
print(f'Training using normal un-class weighted loss..')
loss = 'binary_crossentropy'
keras_model.compile("adam", loss=loss)
model, cv_score = run_keras_model_cv(
X_train=X_train, Y_train=Y_train, X_test=X_test, Y_test=Y_test,
output_dir=sub_output_dir,
model=keras_model,
random_state=j,
njobs=njobs,
)
models[j] = model
cv_scores[j] = cv_score
cv_scores_all[i] = cv_scores
models_all[i] = models
return models_all, cv_scores_all
def run_keyword_prediction_classic(
data_dir,
output_dir,
clf_initializer,
n_splits,
n_rerun=5,
keyword_text_col='sentence1',
label_col='label',
keyword_cat_col='cat',
task='MULTICLASS',
use_class_weight=False,
embedding_model_path=None,
njobs=None,
use_multioutput_wrapper=False,
vectorization_method='sum',
):
""" Train a model using sklearn """
saved_args = locals()
now = datetime.datetime.today().strftime('%Y-%m-%d-%H-%M-%S%f')
output_dir_parent = pathlib.Path(output_dir)
output_dir = pathlib.Path(output_dir) / f'run-glove-{task}_{now}'
output_dir.mkdir(parents=False, exist_ok=False)
embedding_model = load_glove_model(embedding_model_path)
input_df_log = output_dir / "input_data.log"
with open(input_df_log, 'w') as f:
json.dump({k: v.__name__ if callable(v) else v
for k,v in saved_args.items()}, f, indent=4)
data_dir = pathlib.Path(data_dir)
models_all = {}
cv_scores_all = {}
for i in range(0,n_splits):
suffix = '.csv'
print('--------------------------------------------------')
print(f"Training split {i}...")
train_file = data_dir / f"split_{i}" / ("train" + suffix)
print(f"Train file: {train_file}")
train_df = | pd.read_csv(train_file) | pandas.read_csv |
from influxdb import InfluxDBClient, DataFrameClient
import numpy as np
import pandas as pd
import requests
import datetime
import time
import json
import os
import sys
import logging
def readOutput():
# Set some boolean success variables for timer
success = False
## Go up one directory and to the output folder using shitty path functions
output_path = os.path.join(os.getcwd(),'output')
# Archive Path is where files are moved to after processing
archive_path = os.path.join(os.getcwd(),'archive')
#Read CSV Inventory of address names corresponding to user IDs
df_sensors = | pd.read_csv('mbientInventory.csv') | pandas.read_csv |
import asyncio
import aiofiles
import aiohttp
from aiohttp.client import ClientTimeout
import pandas as pd
import json
import numpy as np
import time
from os import listdir
from sentence_transformers import SentenceTransformer
model = SentenceTransformer('bert-base-nli-mean-tokens')
np.set_printoptions(threshold=np.inf,suppress=True)
BASE_URL = 'http://commuter.stanford.edu:9001'
BASE_PATH = '/commuter/PopBots/NLP/Popbots-mTurk-HITS/bert-pipeline/datasets/'
DATASET_NAME = '2020-04-29-MainTurkAggregation-5-Turkers_v0_Sorted'
DATA_COLUMN = 'Input.text'
LABEL_COLUMN_RAW = 'top_label'#'Answer.Label'
MAPPING_DICT = {'Other': 0, 'Everyday Decision Making': 1, 'Work': 2, 'Social Relationships': 3, 'Financial Problem': 4, 'Emotional Turmoil': 5, 'Health, Fatigue, or Physical Pain': 6, 'School': 7, 'Family Issues': 8,'Not Stressful':9}
SCRAPED_CATEGORIES= ['Everyday Decision Making', 'Social Relationships', 'Emotional Turmoil', 'Family Issues', 'Financial Problem','Work','School','Health, Fatigue, or Physical Pain']
OUTPUT_PATH = './inquire_scraped_data/'
scraped_datasets = ["livejournal"]#,"reddit"]
START = time.monotonic()
def read_process_dataset():
df = pd.read_csv(BASE_PATH+DATASET_NAME+'.csv',sep=",")
df[LABEL_COLUMN_RAW] = df[LABEL_COLUMN_RAW].astype(str)
df[DATA_COLUMN] = df[DATA_COLUMN].apply(lambda x: x.lower())
df = df[df[LABEL_COLUMN_RAW].isin(SCRAPED_CATEGORIES)]
df_columns = ['category','mean_embedding', 'nb_sentences','source_sentences']
bootstrapped_df = pd.DataFrame(columns=df_columns)
boostrap_number = 5
for category in SCRAPED_CATEGORIES:
nb_sentences = 38
all_mean = []
for i in range(boostrap_number):
category_df = df[df[LABEL_COLUMN_RAW] == category].sample(n=nb_sentences)
#category_df_unsampled = df[df[LABEL_COLUMN_RAW] == category]
category_df['embedding'] = model.encode(category_df[DATA_COLUMN].values)
category_df['embedding'] = category_df['embedding'].apply(lambda x: np.array(x))
average_mean = np.mean(np.array(category_df['embedding'].values),axis=0) # vector of 768 dim
bootstrapped_df=bootstrapped_df.append({'category':category,'mean_embedding':average_mean,'nb_sentences':nb_sentences,'sources_sentences':list(category_df[DATA_COLUMN].values)},ignore_index=True)
all_mean.append(average_mean)
mean_all_mean = np.mean(np.array(all_mean),axis=0)
bootstrapped_df=bootstrapped_df.append({'category':"All "+category,'mean_embedding':mean_all_mean,'nb_sentences':'','sources_sentences':''},ignore_index=True)
return bootstrapped_df
def aggregate_json_convert_tocsv():
final_df = | pd.DataFrame() | pandas.DataFrame |
import matplotlib.pyplot as plt
import pandas as pd
df_w2v = pd.read_csv("../data/w2v_history.csv", index_col=False)
df_bert = | pd.read_csv("../data/bert_history.csv", index_col=False) | pandas.read_csv |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00_core.ipynb (unless otherwise specified).
__all__ = ['tokenizer', 'model', 'unmasker', 'spacifySeq', 'maskifySeq', 'allResidueCoordinates',
'allResiduePredictions', 'getTopSeq', 'residuePredictionScore', 'hasNonStandardAA']
# Cell
from transformers import BertForMaskedLM, BertTokenizer, pipeline
import pandas as pd
# Cell
tokenizer = BertTokenizer.from_pretrained("Rostlab/prot_bert", do_lower_case=False )
model = BertForMaskedLM.from_pretrained("Rostlab/prot_bert")
unmasker = pipeline('fill-mask', model=model, tokenizer=tokenizer)
# Cell
def spacifySeq(seq):
return "".join([ aa +" " for aa in seq]).strip()
# Cell
def maskifySeq(seq, pos, mask="[MASK]"):
seqList = seq.split()
seqList[pos] = mask
return "".join(aa +" " for aa in seqList).strip()
# Cell
def allResidueCoordinates(seq,residue):
return [i for i, x in enumerate(seq) if x == residue]
# Cell
def allResiduePredictions(seq):
spaceSeq = spacifySeq(seq)
posPredictions = []
for aaPos in range(len(seq)):
aa = seq[aaPos]
maskPosSeq = maskifySeq(spaceSeq, aaPos)
prediction = unmasker(maskPosSeq, top_k=30)
posPredictions.append(prediction)
return posPredictions
# Cell
def getTopSeq(allPredictions):
topSeq = ""
for aaPred in allPredictions:
topSeq += aaPred[0]["token_str"]
return topSeq
# Cell
def residuePredictionScore(allPredictions, seq):
residueScoreDict = {
"wt":list(seq),
"wtIndex":list(range(len(seq)+1))[1:],
"wtScore":[],
"A":[],
"C":[],
"D":[],
"E":[],
"F":[],
"G":[],
"H":[],
"I":[],
"K":[],
"L":[],
"M":[],
"N":[],
"P":[],
"Q":[],
"R":[],
"S":[],
"T":[],
"V":[],
"W":[],
"Y":[]
}
for aaPredPos in range(len(allPredictions)):
aaPred = allPredictions[aaPredPos]
wtAA = seq[aaPredPos]
for predRank in range(len(aaPred)):
posPred = aaPred[predRank]
predAA = posPred["token_str"]
# print(predRank, posPred["token_str"])
if predAA in residueScoreDict:
residueScoreDict[predAA].append(posPred["score"])
if predAA == wtAA:
residueScoreDict["wtScore"].append(posPred["score"])
residueScoreDF = | pd.DataFrame.from_dict(residueScoreDict) | pandas.DataFrame.from_dict |
"""
Regression_program
- train_data = 21대 총선 자료, 788개
- test_data = 21대 총선 자료, 788개 나머지
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import font_manager, rc
from pandas import Series
from sklearn.linear_model import Lasso, Ridge, ElasticNet
# Make graph font English to Korean
font_name = font_manager.FontProperties(fname="c:/Windows/Fonts/malgun.ttf").get_name()
rc('font', family=font_name)
# Training & Test Data Load
#train_data = pd.read_csv('C:/Users/khw08/Desktop/OSBAD_Project/Regression/KoNLPY_M&D_train_CSV_2.csv')
# Naver = 788, 800:6364
train_data = | pd.read_csv('C:/Users/khw08/Desktop/OSBAD_Project/Regression/KoNLPY_Youtube_M&D_train_CSV.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
import multiprocessing as mp
import datetime as dt
def linParts(numAtoms,
numThreads):
"""SNIPPET 20.5 THE linParts FUNCTION
partition of atoms with a single loop
"""
parts=np.linspace(0,numAtoms,min(numThreads,numAtoms)+1)
parts=np.ceil(parts).astype(int)
return parts
def nestedParts(numAtoms,
numThreads,
upperTriang=False):
"""SNIPPET 20.6 THE nestedParts FUNCTION
Partition of atoms with an inner loop
"""
parts,numThreads_=[0],min(numThreads,numAtoms)
for num in range(numThreads_):
part=1+4*(parts[-1]**2+parts[-1]+numAtoms*(numAtoms+1.)/numThreads_)
part=(-1+part**.5)/2.
parts.append(part)
parts=np.round(parts).astype(int)
if upperTriang: # the first rows are heaviest
parts=np.cumsum(np.diff(parts)[::-1])
parts=np.append(np.array([0]),parts)
return parts
def mpPandasObj(func,
pdObj,
numThreads=24,
mpBatches=1,
linMols=True,
**kargs):
"""SNIPPET 20.7 THE mpPandasObj, USED AT VARIOUS POINTS IN THE BOOK
Parallelize jobs, return a dataframe or series
+ func: function to be parallelized. Returns a DataFrame
+ pdObj[0]: Name of argument used to pass the molecule
+ pdObj[1]: List of atoms that will be grouped into molecules
+ kwds: any other argument needed by func
Example: df1=mpPandasObj(func,('molecule',df0.index),24,**kwds)
"""
import pandas as pd
#if linMols:parts=linParts(len(argList[1]),numThreads*mpBatches)
#else:parts=nestedParts(len(argList[1]),numThreads*mpBatches)
if linMols:parts=linParts(len(pdObj[1]),numThreads*mpBatches)
else:parts=nestedParts(len(pdObj[1]),numThreads*mpBatches)
jobs=[]
for i in range(1,len(parts)):
job={pdObj[0]:pdObj[1][parts[i-1]:parts[i]],'func':func}
job.update(kargs)
jobs.append(job)
if numThreads==1:out=processJobs_(jobs)
else: out=processJobs(jobs,numThreads=numThreads)
if isinstance(out[0],pd.DataFrame):df0=pd.DataFrame()
elif isinstance(out[0],pd.Series):df0= | pd.Series() | pandas.Series |
import os
import io
import random
import string
import re
import json
import pandas as pd
import numpy as np
from collections import OrderedDict
import nltk
from nltk import FreqDist
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
import config
EMPH_TOKEN = config.EMPH_TOKEN
CONTRAST_TOKEN = config.CONTRAST_TOKEN
CONCESSION_TOKEN = config.CONCESSION_TOKEN
# TODO: redesign the data loading so as to be object-oriented
def load_training_data(data_trainset, data_devset, input_concat=False, generate_vocab=False, skip_if_exist=True):
"""Generate source and target files in the required input format for the model training.
"""
training_source_file = os.path.join(config.DATA_DIR, 'training_source.txt')
training_target_file = os.path.join(config.DATA_DIR, 'training_target.txt')
dev_source_file = os.path.join(config.DATA_DIR, 'dev_source.txt')
dev_target_file = os.path.join(config.DATA_DIR, 'dev_target.txt')
if skip_if_exist:
# If there is an existing source and target file, skip their generation
if os.path.isfile(training_source_file) and \
os.path.isfile(training_target_file) and \
os.path.isfile(dev_source_file) and \
os.path.isfile(dev_target_file):
print('Found existing input files. Skipping their generation.')
return
dataset = init_training_data(data_trainset, data_devset)
dataset_name = dataset['dataset_name']
x_train, y_train, x_dev, y_dev = dataset['data']
_, _, slot_sep, val_sep, val_sep_end = dataset['separators']
# Preprocess the MRs and the utterances
x_train = [preprocess_mr(x, dataset['separators']) for x in x_train]
x_dev = [preprocess_mr(x, dataset['separators']) for x in x_dev]
y_train = [preprocess_utterance(y) for y in y_train]
y_dev = [preprocess_utterance(y) for y in y_dev]
# Produce sequences of extracted words from the meaning representations (MRs) in the trainset
x_train_seq = []
for i, mr in enumerate(x_train):
slot_ctr = 0
emph_idxs = set()
# contrast_idxs = set()
# concession_idxs = set()
mr_dict = OrderedDict()
# Extract the slot-value pairs into a dictionary
for slot_value in mr.split(slot_sep):
slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)
if slot == EMPH_TOKEN:
emph_idxs.add(slot_ctr)
# elif slot == CONTRAST_TOKEN:
# contrast_idxs.add(slot_ctr)
# elif slot == CONCESSION_TOKEN:
# concession_idxs.add(slot_ctr)
else:
mr_dict[slot] = value
slot_ctr += 1
# Delexicalize the MR and the utterance
y_train[i] = delex_sample(mr_dict, y_train[i], dataset=dataset_name, input_concat=input_concat)
slot_ctr = 0
# Convert the dictionary to a list
x_train_seq.append([])
for key, val in mr_dict.items():
# Insert the emphasis token where appropriate
if slot_ctr in emph_idxs:
x_train_seq[i].append(EMPH_TOKEN)
# Insert the contrast token where appropriate
# if slot_ctr in contrast_idxs:
# x_train_seq[i].append(CONTRAST_TOKEN)
# # Insert the concession token where appropriate
# if slot_ctr in concession_idxs:
# x_train_seq[i].append(CONCESSION_TOKEN)
if len(val) > 0:
x_train_seq[i].extend([key] + val.split())
else:
x_train_seq[i].append(key)
slot_ctr += 1
if input_concat:
# Append a sequence-end token to be paired up with seq2seq's sequence-end token when concatenating
x_train_seq[i].append('<STOP>')
# Produce sequences of extracted words from the meaning representations (MRs) in the devset
x_dev_seq = []
for i, mr in enumerate(x_dev):
slot_ctr = 0
emph_idxs = set()
# contrast_idxs = set()
# concession_idxs = set()
mr_dict = OrderedDict()
# Extract the slot-value pairs into a dictionary
for slot_value in mr.split(slot_sep):
slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)
if slot == EMPH_TOKEN:
emph_idxs.add(slot_ctr)
# elif slot == CONTRAST_TOKEN:
# contrast_idxs.add(slot_ctr)
# elif slot == CONCESSION_TOKEN:
# concession_idxs.add(slot_ctr)
else:
mr_dict[slot] = value
slot_ctr += 1
# Delexicalize the MR and the utterance
y_dev[i] = delex_sample(mr_dict, y_dev[i], dataset=dataset_name, input_concat=input_concat)
slot_ctr = 0
# Convert the dictionary to a list
x_dev_seq.append([])
for key, val in mr_dict.items():
# Insert the emphasis token where appropriate
if slot_ctr in emph_idxs:
x_dev_seq[i].append(EMPH_TOKEN)
# Insert the contrast token where appropriate
# if slot_ctr in contrast_idxs:
# x_dev_seq[i].append(CONTRAST_TOKEN)
# # Insert the concession token where appropriate
# if slot_ctr in concession_idxs:
# x_dev_seq[i].append(CONCESSION_TOKEN)
if len(val) > 0:
x_dev_seq[i].extend([key] + val.split())
else:
x_dev_seq[i].append(key)
slot_ctr += 1
if input_concat:
# Append a sequence-end token to be paired up with seq2seq's sequence-end token when concatenating
x_dev_seq[i].append('<STOP>')
y_train_seq = [word_tokenize(y) for y in y_train]
y_dev_seq = [word_tokenize(y) for y in y_dev]
# Generate a vocabulary file if necessary
if generate_vocab:
generate_vocab_file(np.concatenate(x_train_seq + x_dev_seq + y_train_seq + y_dev_seq),
vocab_filename='vocab.lang_gen.tokens')
# generate_vocab_file(np.concatenate(x_train_seq + x_dev_seq),
# vocab_filename='vocab.lang_gen_multi_vocab.source')
# generate_vocab_file(np.concatenate(y_train_seq + y_dev_seq),
# vocab_filename='vocab.lang_gen_multi_vocab.target')
with io.open(training_source_file, 'w', encoding='utf8') as f_x_train:
for line in x_train_seq:
f_x_train.write('{}\n'.format(' '.join(line)))
with io.open(training_target_file, 'w', encoding='utf8') as f_y_train:
for line in y_train:
f_y_train.write(line + '\n')
with io.open(dev_source_file, 'w', encoding='utf8') as f_x_dev:
for line in x_dev_seq:
f_x_dev.write('{}\n'.format(' '.join(line)))
with io.open(dev_target_file, 'w', encoding='utf8') as f_y_dev:
for line in y_dev:
f_y_dev.write(line + '\n')
return np.concatenate(x_train_seq + x_dev_seq + y_train_seq + y_dev_seq).flatten()
def load_test_data(data_testset, input_concat=False):
"""Generate source and target files in the required input format for the model testing.
"""
test_source_file = os.path.join(config.DATA_DIR, 'test_source.txt')
test_source_dict_file = os.path.join(config.DATA_DIR, 'test_source_dict.json')
test_target_file = os.path.join(config.DATA_DIR, 'test_target.txt')
test_reference_file = os.path.join(config.METRICS_DIR, 'test_references.txt')
dataset = init_test_data(data_testset)
dataset_name = dataset['dataset_name']
x_test, y_test = dataset['data']
_, _, slot_sep, val_sep, val_sep_end = dataset['separators']
# Preprocess the MRs
x_test = [preprocess_mr(x, dataset['separators']) for x in x_test]
# Produce sequences of extracted words from the meaning representations (MRs) in the testset
x_test_seq = []
x_test_dict = []
for i, mr in enumerate(x_test):
slot_ctr = 0
emph_idxs = set()
# contrast_idxs = set()
# concession_idxs = set()
mr_dict = OrderedDict()
mr_dict_cased = OrderedDict()
# Extract the slot-value pairs into a dictionary
for slot_value in mr.split(slot_sep):
slot, value, _, value_orig = parse_slot_and_value(slot_value, val_sep, val_sep_end)
if slot == EMPH_TOKEN:
emph_idxs.add(slot_ctr)
# elif slot == CONTRAST_TOKEN:
# contrast_idxs.add(slot_ctr)
# elif slot == CONCESSION_TOKEN:
# concession_idxs.add(slot_ctr)
else:
mr_dict[slot] = value
mr_dict_cased[slot] = value_orig
slot_ctr += 1
# Build an MR dictionary with original values
x_test_dict.append(mr_dict_cased)
# Delexicalize the MR
delex_sample(mr_dict, dataset=dataset_name, mr_only=True, input_concat=input_concat)
slot_ctr = 0
# Convert the dictionary to a list
x_test_seq.append([])
for key, val in mr_dict.items():
# Insert the emphasis token where appropriate
if slot_ctr in emph_idxs:
x_test_seq[i].append(EMPH_TOKEN)
# Insert the contrast token where appropriate
# if slot_ctr in contrast_idxs:
# x_test_seq[i].append(CONTRAST_TOKEN)
# # Insert the concession token where appropriate
# if slot_ctr in concession_idxs:
# x_test_seq[i].append(CONCESSION_TOKEN)
if len(val) > 0:
x_test_seq[i].extend([key] + val.split())
else:
x_test_seq[i].append(key)
slot_ctr += 1
if input_concat:
# Append a sequence-end token to be paired up with seq2seq's sequence-end token when concatenating
x_test_seq[i].append('<STOP>')
with io.open(test_source_file, 'w', encoding='utf8') as f_x_test:
for line in x_test_seq:
f_x_test.write('{}\n'.format(' '.join(line)))
with io.open(test_source_dict_file, 'w', encoding='utf8') as f_x_test_dict:
json.dump(x_test_dict, f_x_test_dict)
if len(y_test) > 0:
with io.open(test_target_file, 'w', encoding='utf8') as f_y_test:
for line in y_test:
f_y_test.write(line + '\n')
# Reference file for calculating metrics for test predictions
with io.open(test_reference_file, 'w', encoding='utf8') as f_y_test:
for i, line in enumerate(y_test):
if i > 0 and x_test[i] != x_test[i - 1]:
f_y_test.write('\n')
f_y_test.write(line + '\n')
def generate_vocab_file(token_sequences, vocab_filename, vocab_size=10000):
vocab_file = os.path.join(config.DATA_DIR, vocab_filename)
distr = FreqDist(token_sequences)
vocab = distr.most_common(min(len(distr), vocab_size - 3)) # cap the vocabulary size
vocab_with_reserved_tokens = ['<pad>', '<EOS>'] + list(map(lambda tup: tup[0], vocab)) + ['UNK']
with io.open(vocab_file, 'w', encoding='utf8') as f_vocab:
for token in vocab_with_reserved_tokens:
f_vocab.write('{}\n'.format(token))
def get_vocabulary(token_sequences, vocab_size=10000):
distr = FreqDist(token_sequences)
vocab = distr.most_common(min(len(distr), vocab_size)) # cap the vocabulary size
vocab_set = set(map(lambda tup: tup[0], vocab))
return vocab_set
# TODO: generalize and utilize in the loading functions
def tokenize_mr(mr):
"""Produces a (delexicalized) sequence of tokens from the input MR.
Method used in the client to preprocess a single MR that is sent to the service for utterance generation.
"""
slot_sep = ','
val_sep = '['
val_sep_end = ']'
mr_seq = []
slot_ctr = 0
emph_idxs = set()
mr_dict = OrderedDict()
mr_dict_cased = OrderedDict()
# Extract the slot-value pairs into a dictionary
for slot_value in mr.split(slot_sep):
slot, value, _, value_orig = parse_slot_and_value(slot_value, val_sep, val_sep_end)
if slot == EMPH_TOKEN:
emph_idxs.add(slot_ctr)
else:
mr_dict[slot] = value
mr_dict_cased[slot] = value_orig
slot_ctr += 1
# Delexicalize the MR
delex_sample(mr_dict, mr_only=True)
slot_ctr = 0
# Convert the dictionary to a list
for key, val in mr_dict.items():
# Insert the emphasis token where appropriate
if slot_ctr in emph_idxs:
mr_seq.append(EMPH_TOKEN)
if len(val) > 0:
mr_seq.extend([key] + val.split())
else:
mr_seq.append(key)
slot_ctr += 1
return mr_seq, mr_dict_cased
def load_training_data_for_eval(data_trainset, data_model_outputs_train, vocab_size, max_input_seq_len, max_output_seq_len, delex=False):
dataset_name = ''
slot_sep = ''
val_sep = ''
val_sep_end = None
if '/rest_e2e/' in data_trainset or '\\rest_e2e\\' in data_trainset:
x_train, y_train_1 = read_rest_e2e_dataset_train(data_trainset)
y_train_2 = read_predictions(data_model_outputs_train)
dataset_name = 'rest_e2e'
slot_sep = ','
val_sep = '['
val_sep_end = ']'
elif '/tv/' in data_trainset or '\\tv\\' in data_trainset:
x_train, y_train_1, y_train_2 = read_tv_dataset_train(data_trainset)
if data_model_outputs_train is not None:
y_train_2 = read_predictions(data_model_outputs_train)
dataset_name = 'tv'
slot_sep = ';'
val_sep = '='
elif '/laptop/' in data_trainset or '\\laptop\\' in data_trainset:
x_train, y_train_1, y_train_2 = read_laptop_dataset_train(data_trainset)
if data_model_outputs_train is not None:
y_train_2 = read_predictions(data_model_outputs_train)
dataset_name = 'laptop'
slot_sep = ';'
val_sep = '='
else:
raise FileNotFoundError
# parse the utterances into lists of words
y_train_1 = [preprocess_utterance(y) for y in y_train_1]
y_train_2 = [preprocess_utterance(y) for y in y_train_2]
# produce sequences of extracted words from the meaning representations (MRs) in the trainset
x_train_seq = []
for i, mr in enumerate(x_train):
mr_dict = OrderedDict()
for slot_value in mr.split(slot_sep):
slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)
mr_dict[slot] = value
if delex == True:
# delexicalize the MR and the utterance
y_train_1[i] = delex_sample(mr_dict, y_train_1[i], dataset=dataset_name, utterance_only=True)
y_train_2[i] = delex_sample(mr_dict, y_train_2[i], dataset=dataset_name)
# convert the dictionary to a list
x_train_seq.append([])
for key, val in mr_dict.items():
if len(val) > 0:
x_train_seq[i].extend([key, val])
else:
x_train_seq[i].append(key)
# create source vocabulary
if os.path.isfile('data/eval_vocab_source.json'):
with io.open('data/eval_vocab_source.json', 'r', encoding='utf8') as f_x_vocab:
x_vocab = json.load(f_x_vocab)
else:
x_distr = FreqDist([x_token for x in x_train_seq for x_token in x])
x_vocab = x_distr.most_common(min(len(x_distr), vocab_size - 2)) # cap the vocabulary size
with io.open('data/eval_vocab_source.json', 'w', encoding='utf8') as f_x_vocab:
json.dump(x_vocab, f_x_vocab, ensure_ascii=False)
x_idx2word = [word[0] for word in x_vocab]
x_idx2word.insert(0, '<PADDING>')
x_idx2word.append('<NA>')
x_word2idx = {word: idx for idx, word in enumerate(x_idx2word)}
# create target vocabulary
if os.path.isfile('data/eval_vocab_target.json'):
with io.open('data/eval_vocab_target.json', 'r', encoding='utf8') as f_y_vocab:
y_vocab = json.load(f_y_vocab)
else:
y_distr = FreqDist([y_token for y in y_train_1 for y_token in y] + [y_token for y in y_train_2 for y_token in y])
y_vocab = y_distr.most_common(min(len(y_distr), vocab_size - 2)) # cap the vocabulary size
with io.open('data/eval_vocab_target.json', 'w', encoding='utf8') as f_y_vocab:
json.dump(y_vocab, f_y_vocab, ensure_ascii=False)
y_idx2word = [word[0] for word in y_vocab]
y_idx2word.insert(0, '<PADDING>')
y_idx2word.append('<NA>')
y_word2idx = {token: idx for idx, token in enumerate(y_idx2word)}
# produce sequences of indexes from the MRs in the training set
x_train_enc = token_seq_to_idx_seq(x_train_seq, x_word2idx, max_input_seq_len)
# produce sequences of indexes from the utterances in the training set
y_train_1_enc = token_seq_to_idx_seq(y_train_1, y_word2idx, max_output_seq_len)
# produce sequences of indexes from the utterances in the training set
y_train_2_enc = token_seq_to_idx_seq(y_train_2, y_word2idx, max_output_seq_len)
# produce the list of the target labels in the training set
labels_train = np.concatenate((np.ones(len(y_train_1_enc)), np.zeros(len(y_train_2_enc))))
return (np.concatenate((np.array(x_train_enc), np.array(x_train_enc))),
np.concatenate((np.array(y_train_1_enc), np.array(y_train_2_enc))),
labels_train)
def load_dev_data_for_eval(data_devset, data_model_outputs_dev, vocab_size, max_input_seq_len, max_output_seq_len, delex=True):
dataset_name = ''
slot_sep = ''
val_sep = ''
val_sep_end = None
if '/rest_e2e/' in data_devset or '\\rest_e2e\\' in data_devset:
x_dev, y_dev_1 = read_rest_e2e_dataset_dev(data_devset)
y_dev_2 = read_predictions(data_model_outputs_dev)
dataset_name = 'rest_e2e'
slot_sep = ','
val_sep = '['
val_sep_end = ']'
elif '/tv/' in data_devset or '\\tv\\' in data_devset:
x_dev, y_dev_1, y_dev_2 = read_tv_dataset_dev(data_devset)
if data_model_outputs_dev is not None:
y_dev_2 = read_predictions(data_model_outputs_dev)
dataset_name = 'tv'
slot_sep = ';'
val_sep = '='
elif '/laptop/' in data_devset or '\\laptop\\' in data_devset:
x_dev, y_dev_1, y_dev_2 = read_laptop_dataset_dev(data_devset)
if data_model_outputs_dev is not None:
y_dev_2 = read_predictions(data_model_outputs_dev)
dataset_name = 'laptop'
slot_sep = ';'
val_sep = '='
else:
raise FileNotFoundError
# parse the utterances into lists of words
y_dev_1 = [preprocess_utterance(y) for y in y_dev_1]
y_dev_2 = [preprocess_utterance(y) for y in y_dev_2]
# produce sequences of extracted words from the meaning representations (MRs) in the devset
x_dev_seq = []
for i, mr in enumerate(x_dev):
mr_dict = OrderedDict()
for slot_value in mr.split(slot_sep):
slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)
mr_dict[slot] = value
if delex == True:
# delexicalize the MR and the utterance
y_dev_1[i] = delex_sample(mr_dict, y_dev_1[i], dataset=dataset_name, utterance_only=True)
y_dev_2[i] = delex_sample(mr_dict, y_dev_2[i], dataset=dataset_name)
# convert the dictionary to a list
x_dev_seq.append([])
for key, val in mr_dict.items():
if len(val) > 0:
x_dev_seq[i].extend([key, val])
else:
x_dev_seq[i].append(key)
# load the source vocabulary
with io.open('data/eval_vocab_source.json', 'r', encoding='utf8') as f_x_vocab:
x_vocab = json.load(f_x_vocab)
x_idx2word = [word[0] for word in x_vocab]
x_idx2word.insert(0, '<PADDING>')
x_idx2word.append('<NA>')
x_word2idx = {word: idx for idx, word in enumerate(x_idx2word)}
# load the target vocabulary
with io.open('data/eval_vocab_target.json', 'r', encoding='utf8') as f_y_vocab:
y_vocab = json.load(f_y_vocab)
y_idx2word = [word[0] for word in y_vocab]
y_idx2word.insert(0, '<PADDING>')
y_idx2word.append('<NA>')
y_word2idx = {token: idx for idx, token in enumerate(y_idx2word)}
# produce sequences of indexes from the MRs in the devset
x_dev_enc = token_seq_to_idx_seq(x_dev_seq, x_word2idx, max_input_seq_len)
# produce sequences of indexes from the utterances in the devset
y_dev_1_enc = token_seq_to_idx_seq(y_dev_1, y_word2idx, max_output_seq_len)
# produce sequences of indexes from the utterances in the devset
y_dev_2_enc = token_seq_to_idx_seq(y_dev_2, y_word2idx, max_output_seq_len)
# produce the list of the target labels in the devset
labels_dev = np.concatenate((np.ones(len(y_dev_1_enc)), np.zeros(len(y_dev_2_enc))))
return (np.concatenate((np.array(x_dev_enc), np.array(x_dev_enc))),
np.concatenate((np.array(y_dev_1_enc), np.array(y_dev_2_enc))),
labels_dev)
def load_test_data_for_eval(data_testset, data_model_outputs_test, vocab_size, max_input_seq_len, max_output_seq_len, delex=False):
dataset_name = ''
slot_sep = ''
val_sep = ''
val_sep_end = None
if '/rest_e2e/' in data_testset or '\\rest_e2e\\' in data_testset:
x_test, _ = read_rest_e2e_dataset_test(data_testset)
y_test = read_predictions(data_model_outputs_test)
dataset_name = 'rest_e2e'
slot_sep = ','
val_sep = '['
val_sep_end = ']'
elif '/tv/' in data_testset or '\\tv\\' in data_testset:
x_test, _, y_test = read_tv_dataset_test(data_testset)
if data_model_outputs_test is not None:
y_test = read_predictions(data_model_outputs_test)
dataset_name = 'tv'
slot_sep = ';'
val_sep = '='
elif '/laptop/' in data_testset or '\\laptop\\' in data_testset:
x_test, _, y_test = read_laptop_dataset_test(data_testset)
if data_model_outputs_test is not None:
y_test = read_predictions(data_model_outputs_test)
dataset_name = 'laptop'
slot_sep = ';'
val_sep = '='
else:
raise FileNotFoundError
# parse the utterances into lists of words
y_test = [preprocess_utterance(y) for y in y_test]
#y_test_1 = [preprocess_utterance(y) for y in y_test_1]
#y_test_2 = [preprocess_utterance(y) for y in y_test_2]
# produce sequences of extracted words from the meaning representations (MRs) in the testset
x_test_seq = []
for i, mr in enumerate(x_test):
mr_dict = OrderedDict()
for slot_value in mr.split(slot_sep):
slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)
mr_dict[slot] = value
if delex == True:
# delexicalize the MR and the utterance
y_test[i] = delex_sample(mr_dict, y_test[i], dataset=dataset_name)
#y_test_1[i] = delex_sample(mr_dict, y_test_1[i], dataset=dataset_name, utterance_only=True)
#y_test_2[i] = delex_sample(mr_dict, y_test_2[i], dataset=dataset_name)
# convert the dictionary to a list
x_test_seq.append([])
for key, val in mr_dict.items():
if len(val) > 0:
x_test_seq[i].extend([key, val])
else:
x_test_seq[i].append(key)
# load the source vocabulary
with io.open('data/eval_vocab_source.json', 'r', encoding='utf8') as f_x_vocab:
x_vocab = json.load(f_x_vocab)
x_idx2word = [word[0] for word in x_vocab]
x_idx2word.insert(0, '<PADDING>')
x_idx2word.append('<NA>')
x_word2idx = {word: idx for idx, word in enumerate(x_idx2word)}
# load the target vocabulary
with io.open('data/eval_vocab_target.json', 'r', encoding='utf8') as f_y_vocab:
y_vocab = json.load(f_y_vocab)
y_idx2word = [word[0] for word in y_vocab]
y_idx2word.insert(0, '<PADDING>')
y_idx2word.append('<NA>')
y_word2idx = {token: idx for idx, token in enumerate(y_idx2word)}
# produce sequences of indexes from the MRs in the test set
x_test_enc = token_seq_to_idx_seq(x_test_seq, x_word2idx, max_input_seq_len)
# produce sequences of indexes from the utterances in the test set
y_test_enc = token_seq_to_idx_seq(y_test, y_word2idx, max_output_seq_len)
#y_test_1_enc = token_seq_to_idx_seq(y_test_1, y_word2idx, max_output_seq_len)
#y_test_2_enc = token_seq_to_idx_seq(y_test_2, y_word2idx, max_output_seq_len)
# produce the list of the target labels in the test set
labels_test = np.ones(len(y_test_enc))
#labels_test = np.concatenate((np.ones(len(y_test_1_enc)), np.zeros(len(y_test_2_enc))))
return (np.array(x_test_enc),
np.array(y_test_enc),
labels_test,
x_idx2word,
y_idx2word)
#return (np.concatenate((np.array(x_test_enc), np.array(x_test_enc))),
# np.concatenate((np.array(y_test_1_enc), np.array(y_test_2_enc))),
# labels_test,
# x_idx2word,
# y_idx2word)
# ---- AUXILIARY FUNCTIONS ----
def init_training_data(data_trainset, data_devset):
if 'rest_e2e' in data_trainset and 'rest_e2e' in data_devset:
x_train, y_train = read_rest_e2e_dataset_train(data_trainset)
x_dev, y_dev = read_rest_e2e_dataset_dev(data_devset)
dataset_name = 'rest_e2e'
da_sep = '('
da_sep_end = ')'
slot_sep = ', '
val_sep = '['
val_sep_end = ']'
elif 'video_game' in data_trainset and 'video_game' in data_devset:
x_train, y_train = read_video_game_dataset_train(data_trainset)
x_dev, y_dev = read_video_game_dataset_dev(data_devset)
dataset_name = 'video_game'
da_sep = '('
da_sep_end = ')'
slot_sep = ', '
val_sep = '['
val_sep_end = ']'
elif 'tv' in data_trainset and 'tv' in data_devset:
x_train, y_train, _ = read_tv_dataset_train(data_trainset)
x_dev, y_dev, _ = read_tv_dataset_dev(data_devset)
dataset_name = 'tv'
da_sep = '('
da_sep_end = ')'
slot_sep = ';'
val_sep = '='
val_sep_end = None
elif 'laptop' in data_trainset and 'laptop' in data_devset:
x_train, y_train, _ = read_laptop_dataset_train(data_trainset)
x_dev, y_dev, _ = read_laptop_dataset_dev(data_devset)
dataset_name = 'laptop'
da_sep = '('
da_sep_end = ')'
slot_sep = ';'
val_sep = '='
val_sep_end = None
elif 'hotel' in data_trainset and 'hotel' in data_devset:
x_train, y_train, _ = read_hotel_dataset_train(data_trainset)
x_dev, y_dev, _ = read_hotel_dataset_dev(data_devset)
dataset_name = 'hotel'
da_sep = '('
da_sep_end = ')'
slot_sep = ';'
val_sep = '='
val_sep_end = None
else:
raise ValueError('Unexpected file name or path: {0}, {1}'.format(data_trainset, data_devset))
return {
'dataset_name': dataset_name,
'data': (x_train, y_train, x_dev, y_dev),
'separators': (da_sep, da_sep_end, slot_sep, val_sep, val_sep_end)
}
def init_test_data(data_testset):
if 'rest_e2e' in data_testset:
x_test, y_test = read_rest_e2e_dataset_test(data_testset)
dataset_name = 'rest_e2e'
da_sep = '('
da_sep_end = ')'
slot_sep = ', '
val_sep = '['
val_sep_end = ']'
elif 'video_game' in data_testset:
x_test, y_test = read_video_game_dataset_test(data_testset)
dataset_name = 'video_game'
da_sep = '('
da_sep_end = ')'
slot_sep = ', '
val_sep = '['
val_sep_end = ']'
elif 'tv' in data_testset:
x_test, y_test, _ = read_tv_dataset_test(data_testset)
dataset_name = 'tv'
da_sep = '('
da_sep_end = ')'
slot_sep = ';'
val_sep = '='
val_sep_end = None
elif 'laptop' in data_testset:
x_test, y_test, _ = read_laptop_dataset_test(data_testset)
dataset_name = 'laptop'
da_sep = '('
da_sep_end = ')'
slot_sep = ';'
val_sep = '='
val_sep_end = None
elif 'hotel' in data_testset:
x_test, y_test, _ = read_hotel_dataset_test(data_testset)
dataset_name = 'hotel'
da_sep = '('
da_sep_end = ')'
slot_sep = ';'
val_sep = '='
val_sep_end = None
else:
raise ValueError('Unexpected file name or path: {0}'.format(data_testset))
return {
'dataset_name': dataset_name,
'data': (x_test, y_test),
'separators': (da_sep, da_sep_end, slot_sep, val_sep, val_sep_end)
}
def read_rest_e2e_dataset_train(data_trainset):
# read the training data from file
df_train = pd.read_csv(data_trainset, header=0, encoding='utf8') # names=['mr', 'ref']
x_train = df_train.mr.tolist()
y_train = df_train.ref.tolist()
return x_train, y_train
def read_rest_e2e_dataset_dev(data_devset):
# read the development data from file
df_dev = pd.read_csv(data_devset, header=0, encoding='utf8') # names=['mr', 'ref']
x_dev = df_dev.mr.tolist()
y_dev = df_dev.ref.tolist()
return x_dev, y_dev
def read_rest_e2e_dataset_test(data_testset):
# read the test data from file
df_test = pd.read_csv(data_testset, header=0, encoding='utf8') # names=['mr', 'ref']
x_test = df_test.iloc[:, 0].tolist()
y_test = []
if df_test.shape[1] > 1:
y_test = df_test.iloc[:, 1].tolist()
return x_test, y_test
def read_video_game_dataset_train(data_trainset):
# read the training data from file
df_train = pd.read_csv(data_trainset, header=0, encoding='utf8') # names=['mr', 'ref']
x_train = df_train.mr.tolist()
y_train = df_train.ref.tolist()
return x_train, y_train
def read_video_game_dataset_dev(data_devset):
# read the development data from file
df_dev = pd.read_csv(data_devset, header=0, encoding='utf8') # names=['mr', 'ref']
x_dev = df_dev.mr.tolist()
y_dev = df_dev.ref.tolist()
return x_dev, y_dev
def read_video_game_dataset_test(data_testset):
# read the test data from file
df_test = pd.read_csv(data_testset, header=0, encoding='utf8') # names=['mr', 'ref']
x_test = df_test.iloc[:, 0].tolist()
y_test = []
if df_test.shape[1] > 1:
y_test = df_test.iloc[:, 1].tolist()
return x_test, y_test
def read_tv_dataset_train(path_to_trainset):
with io.open(path_to_trainset, encoding='utf8') as f_trainset:
# Skip the comment block at the beginning of the file
f_trainset, _ = skip_comment_block(f_trainset, '#')
# read the training data from file
df_train = pd.read_json(f_trainset, encoding='utf8')
x_train = df_train.iloc[:, 0].tolist()
y_train = df_train.iloc[:, 1].tolist()
y_train_alt = df_train.iloc[:, 2].tolist()
# TODO: remove from here and use the universal DA extraction instead
# transform the MR to contain the DA type as the first slot
for i, mr in enumerate(x_train):
x_train[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')
# convert plural nouns to "[noun] -s" or "[noun] -es" form
for i, utt in enumerate(y_train):
y_train[i] = replace_plural_nouns(utt)
for i, utt in enumerate(y_train_alt):
y_train_alt[i] = replace_plural_nouns(utt)
return x_train, y_train, y_train_alt
def read_tv_dataset_dev(path_to_devset):
with io.open(path_to_devset, encoding='utf8') as f_devset:
# Skip the comment block at the beginning of the file
f_devset, _ = skip_comment_block(f_devset, '#')
# read the development data from file
df_dev = pd.read_json(f_devset, encoding='utf8')
x_dev = df_dev.iloc[:, 0].tolist()
y_dev = df_dev.iloc[:, 1].tolist()
y_dev_alt = df_dev.iloc[:, 2].tolist()
# TODO: remove from here and use the universal DA extraction instead
# transform the MR to contain the DA type as the first slot
for i, mr in enumerate(x_dev):
x_dev[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')
# convert plural nouns to "[noun] -s" or "[noun] -es" form
for i, utt in enumerate(y_dev):
y_dev[i] = replace_plural_nouns(utt)
for i, utt in enumerate(y_dev_alt):
y_dev_alt[i] = replace_plural_nouns(utt)
return x_dev, y_dev, y_dev_alt
def read_tv_dataset_test(path_to_testset):
with io.open(path_to_testset, encoding='utf8') as f_testset:
# Skip the comment block at the beginning of the file
f_testset, _ = skip_comment_block(f_testset, '#')
# read the test data from file
df_test = pd.read_json(f_testset, encoding='utf8')
x_test = df_test.iloc[:, 0].tolist()
y_test = df_test.iloc[:, 1].tolist()
y_test_alt = df_test.iloc[:, 2].tolist()
# TODO: remove from here and use the universal DA extraction instead
# transform the MR to contain the DA type as the first slot
for i, mr in enumerate(x_test):
x_test[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')
return x_test, y_test, y_test_alt
def read_laptop_dataset_train(path_to_trainset):
with io.open(path_to_trainset, encoding='utf8') as f_trainset:
# Skip the comment block at the beginning of the file
f_trainset, _ = skip_comment_block(f_trainset, '#')
# read the training data from file
df_train = pd.read_json(f_trainset, encoding='utf8')
x_train = df_train.iloc[:, 0].tolist()
y_train = df_train.iloc[:, 1].tolist()
y_train_alt = df_train.iloc[:, 2].tolist()
# TODO: remove from here and use the universal DA extraction instead
# transform the MR to contain the DA type as the first slot
for i, mr in enumerate(x_train):
x_train[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')
return x_train, y_train, y_train_alt
def read_laptop_dataset_dev(path_to_devset):
with io.open(path_to_devset, encoding='utf8') as f_devset:
# Skip the comment block at the beginning of the file
f_devset, _ = skip_comment_block(f_devset, '#')
# read the development data from file
df_dev = pd.read_json(f_devset, encoding='utf8')
x_dev = df_dev.iloc[:, 0].tolist()
y_dev = df_dev.iloc[:, 1].tolist()
y_dev_alt = df_dev.iloc[:, 2].tolist()
# TODO: remove from here and use the universal DA extraction instead
# transform the MR to contain the DA type as the first slot
for i, mr in enumerate(x_dev):
x_dev[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')
return x_dev, y_dev, y_dev_alt
def read_laptop_dataset_test(path_to_testset):
with io.open(path_to_testset, encoding='utf8') as f_testset:
# Skip the comment block at the beginning of the file
f_testset, _ = skip_comment_block(f_testset, '#')
# read the test data from file
df_test = pd.read_json(f_testset, encoding='utf8')
x_test = df_test.iloc[:, 0].tolist()
y_test = df_test.iloc[:, 1].tolist()
y_test_alt = df_test.iloc[:, 2].tolist()
# TODO: remove from here and use the universal DA extraction instead
# transform the MR to contain the DA type as the first slot
for i, mr in enumerate(x_test):
x_test[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')
return x_test, y_test, y_test_alt
def read_hotel_dataset_train(path_to_trainset):
with io.open(path_to_trainset, encoding='utf8') as f_trainset:
# Skip the comment block at the beginning of the file
f_trainset, _ = skip_comment_block(f_trainset, '#')
# read the training data from file
df_train = pd.read_json(f_trainset, encoding='utf8')
x_train = df_train.iloc[:, 0].tolist()
y_train = df_train.iloc[:, 1].tolist()
y_train_alt = df_train.iloc[:, 2].tolist()
# TODO: remove from here and use the universal DA extraction instead
# transform the MR to contain the DA type as the first slot
for i, mr in enumerate(x_train):
x_train[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')
return x_train, y_train, y_train_alt
def read_hotel_dataset_dev(path_to_devset):
with io.open(path_to_devset, encoding='utf8') as f_devset:
# Skip the comment block at the beginning of the file
f_devset, _ = skip_comment_block(f_devset, '#')
# read the development data from file
df_dev = pd.read_json(f_devset, encoding='utf8')
x_dev = df_dev.iloc[:, 0].tolist()
y_dev = df_dev.iloc[:, 1].tolist()
y_dev_alt = df_dev.iloc[:, 2].tolist()
# TODO: remove from here and use the universal DA extraction instead
# transform the MR to contain the DA type as the first slot
for i, mr in enumerate(x_dev):
x_dev[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')
return x_dev, y_dev, y_dev_alt
def read_hotel_dataset_test(path_to_testset):
with io.open(path_to_testset, encoding='utf8') as f_testset:
# Skip the comment block at the beginning of the file
f_testset, _ = skip_comment_block(f_testset, '#')
# read the test data from file
df_test = | pd.read_json(f_testset, encoding='utf8') | pandas.read_json |
import itertools
import os
import pandas as pd
from tqdm import tqdm
# set directory
git_dir = os.path.expanduser("~/git/prism-4-paper")
os.chdir(git_dir)
# import functions
from functions import clean_mols, get_ecfp6_fingerprints, get_tanimoto
flatten = lambda x: list(itertools.chain.from_iterable(x))
# read true and predicted structures
tc = pd.read_csv(git_dir + '/data/analysis/titanium/tanimoto_coefficients.csv.gz')
## prefilter missing Tcs
tc = tc.dropna(subset=['pred_smiles'])
# get unique clusters and methods
clusters = tc['cluster'].unique()
# set up output data frame
mism = | pd.DataFrame() | pandas.DataFrame |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-09"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-20"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-01-22")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 310, pd.Timestamp("2015-01-09")),
(10, 311, pd.Timestamp("2015-01-15")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-11")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp("2015-01-14")
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-09"),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp("2015-01-20"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
],
"estimate": [130.0, 131.0, 230.0, 231.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30,
}
)
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-15")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [140.0, 240.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40,
}
)
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-12")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [150.0, 250.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50,
}
)
return pd.concat(
[
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
]
)
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (-1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100),
"effective_date": (
pd.Timestamp("2014-01-01"), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp("2015-01-07"),
# Split before Q1 event
pd.Timestamp("2015-01-09"),
# Split before Q1 event
pd.Timestamp("2015-01-13"),
# Split before Q1 event
pd.Timestamp("2015-01-15"),
# Split before Q1 event
pd.Timestamp("2015-01-18"),
# Split after Q1 event and before Q2 event
pd.Timestamp("2015-01-30"),
# Filter out - this is after our date index
pd.Timestamp("2016-01-01"),
),
}
)
sid_10_splits = pd.DataFrame(
{
SID_FIELD_NAME: 10,
"ratio": (0.2, 0.3),
"effective_date": (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp("2015-01-07"),
# Apply a single split before Q1 event.
pd.Timestamp("2015-01-20"),
),
}
)
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame(
{
SID_FIELD_NAME: 20,
"ratio": (
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
pd.Timestamp("2015-01-30"),
),
}
)
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame(
{
SID_FIELD_NAME: 30,
"ratio": (8, 9, 10, 11, 12),
"effective_date": (
# Split before the event and before the
# split-asof-date.
pd.Timestamp("2015-01-07"),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp("2015-01-09"),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
),
}
)
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame(
{
SID_FIELD_NAME: 40,
"ratio": (13, 14),
"effective_date": (
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-22"),
),
}
)
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame(
{
SID_FIELD_NAME: 50,
"ratio": (15, 16),
"effective_date": (
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
),
}
)
return pd.concat(
[
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
]
)
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-12")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-01-21")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-01-29")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-30", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-02-10")),
(30, 231, | pd.Timestamp("2015-01-20") | pandas.Timestamp |
import argparse
import webbrowser
from bs4 import BeautifulSoup
import requests
import pandas as pd
import re
from helper_funcs import *
# ------------------------------------------------ HIGH_SHORT_INTEREST -------------------------------------------------
def high_short_interest(l_args):
parser = argparse.ArgumentParser(prog='high_short',
description='''Print top stocks being more heavily shorted. HighShortInterest.com provides
a convenient sorted database of stocks which have a short interest of over
20 percent. Additional key data such as the float, number of outstanding shares,
and company industry is displayed. Data is presented for the Nasdaq Stock Market,
the New York Stock Exchange, and the American Stock Exchange.
[Source: www.highshortinterest.com]''')
parser.add_argument('-n', "--num", action="store", dest="n_num", type=check_positive, default=10,
help='Number of top stocks to print.')
try:
(ns_parser, l_unknown_args) = parser.parse_known_args(l_args)
except SystemExit:
print("")
return
if l_unknown_args:
print(f"The following args couldn't be interpreted: {l_unknown_args}")
url_high_short_interested_stocks = f"https://www.highshortinterest.com"
text_soup_high_short_interested_stocks = BeautifulSoup(requests.get(url_high_short_interested_stocks).text, "lxml")
a_high_short_interest_header = list()
for high_short_interest_header in text_soup_high_short_interested_stocks.findAll('td', {'class': 'tblhdr'}):
a_high_short_interest_header.append(high_short_interest_header.text.strip('\n').split('\n')[0])
df_high_short_interest = pd.DataFrame(columns=a_high_short_interest_header)
df_high_short_interest.loc[0] = ['', '', '', '', '', '', '']
a_high_short_interested_stocks = re.sub('<!--.*?//-->','', text_soup_high_short_interested_stocks.find_all('td')[2].text, flags=re.DOTALL).split('\n')[2:]
a_high_short_interested_stocks[0] = a_high_short_interested_stocks[0].replace('TickerCompanyExchangeShortIntFloatOutstdIndustry','')
l_stock_info = list()
for elem in a_high_short_interested_stocks:
if elem is '':
continue
l_stock_info.append(elem)
if len(l_stock_info) == 7:
df_high_short_interest.loc[len(df_high_short_interest.index)] = l_stock_info
l_stock_info = list()
pd.set_option('display.max_colwidth', -1)
print(df_high_short_interest.head(n=ns_parser.n_num).to_string(index=False))
print("")
# ---------------------------------------------------- LOW_FLOAT -----------------------------------------------------
def low_float(l_args):
parser = argparse.ArgumentParser(prog='low_float',
description='''Print top stocks with lowest float. LowFloat.com provides a convenient
sorted database of stocks which have a float of under 10 million shares. Additional key
data such as the number of outstanding shares, short interest, and company industry is
displayed. Data is presented for the Nasdaq Stock Market, the New York Stock Exchange,
the American Stock Exchange, and the Over the Counter Bulletin Board.
[Source: www.lowfloat.com]''')
parser.add_argument('-n', "--num", action="store", dest="n_num", type=check_positive, default=10, help='Number of top stocks to print.')
try:
(ns_parser, l_unknown_args) = parser.parse_known_args(l_args)
except SystemExit:
print("")
return
if l_unknown_args:
print(f"The following args couldn't be interpreted: {l_unknown_args}")
url_high_short_interested_stocks = f"https://www.lowfloat.com"
text_soup_low_float_stocks = BeautifulSoup(requests.get(url_high_short_interested_stocks).text, "lxml")
a_low_float_header = list()
for low_float_header in text_soup_low_float_stocks.findAll('td', {'class': 'tblhdr'}):
a_low_float_header.append(low_float_header.text.strip('\n').split('\n')[0])
df_low_float = pd.DataFrame(columns=a_low_float_header)
df_low_float.loc[0] = ['', '', '', '', '', '', '']
a_low_float_stocks = re.sub('<!--.*?//-->','', text_soup_low_float_stocks.find_all('td')[2].text, flags=re.DOTALL).split('\n')[2:]
a_low_float_stocks[0] = a_low_float_stocks[0].replace('TickerCompanyExchangeFloatOutstdShortIntIndustry','')
l_stock_info = list()
for elem in a_low_float_stocks:
if elem is '':
continue
l_stock_info.append(elem)
if len(l_stock_info) == 7:
df_low_float.loc[len(df_low_float.index)] = l_stock_info
l_stock_info = list()
| pd.set_option('display.max_colwidth', -1) | pandas.set_option |
"""Example script for testing / validating the electric grid power flow solution."""
import cvxpy as cp
import numpy as np
import matplotlib.pyplot as plt # TODO: Remove matplotlib dependency.
import os
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import mesmo
def main():
# Settings.
scenario_name = mesmo.config.config['tests']['scenario_name']
results_path = mesmo.utils.get_results_path(__file__, scenario_name)
power_multipliers = np.arange(-0.2, 1.2, 0.1)
# Recreate / overwrite database, to incorporate changes in the CSV files.
mesmo.data_interface.recreate_database()
# Obtain base scaling parameters.
scenario_data = mesmo.data_interface.ScenarioData(scenario_name)
base_power = scenario_data.scenario.at['base_apparent_power']
base_voltage = scenario_data.scenario.at['base_voltage']
# Obtain electric grid models.
electric_grid_model_default = mesmo.electric_grid_models.ElectricGridModelDefault(scenario_name)
electric_grid_model_opendss = mesmo.electric_grid_models.ElectricGridModelOpenDSS(scenario_name)
# Obtain nominal power flow solutions.
power_flow_solution_mesmo_nominal = mesmo.electric_grid_models.PowerFlowSolutionFixedPoint(electric_grid_model_default)
power_flow_solution_opendss_nominal = mesmo.electric_grid_models.PowerFlowSolutionOpenDSS(electric_grid_model_opendss)
# Instantiate results variables.
der_power_vector = (
pd.DataFrame(index=power_multipliers, columns=electric_grid_model_default.ders, dtype=float)
)
node_voltage_vector_mesmo = (
pd.DataFrame(index=power_multipliers, columns=electric_grid_model_default.nodes, dtype=complex)
)
node_voltage_vector_opendss = (
pd.DataFrame(index=power_multipliers, columns=electric_grid_model_default.nodes, dtype=complex)
)
node_voltage_vector_magnitude_mesmo = (
| pd.DataFrame(index=power_multipliers, columns=electric_grid_model_default.nodes, dtype=float) | pandas.DataFrame |
import more_itertools as mit
import peakutils
import warnings
import traceback
import numpy as np
import pandas as pd
import pickle
import logging
import os
import glob
import sys
from tqdm.auto import tqdm
from ceciestunepipe.util.sound import spectral as sp
from ceciestunepipe.util.sound import temporal as st
logger = logging.getLogger('ceciestunepipe.util.sound.boutsearch')
def gimmepower(x, hparams):
s = sp.rosa_spectrogram(x.flatten(), hparams)
f = np.arange(hparams['num_freq'])/hparams['num_freq']*0.5*hparams['sample_rate']
s = s[(f>hparams['fmin']) & (f<hparams['fmax']), :]
f = f[(f>hparams['fmin']) & (f<hparams['fmax'])]
p = s.sum(axis=0)
return p, f, s
def get_on_segments(x, thresh=0, min_segment=20, pk_thresh=0, mean_rms_thresh=0):
on = np.where(x > thresh)[0]
on_segments = [list(group) for group in mit.consecutive_groups(on)]
logger.debug('On segments {}'.format(len(on_segments)))
if len(on_segments) > 0:
hi_segments = np.vstack([np.array([o[0], o[-1]]) for o in on_segments \
if ((np.max(x[o]) > pk_thresh) and \
(st.rms(x[o]) > mean_rms_thresh))])
else:
hi_segments = np.array([])
if len(hi_segments) > 0:
long_enough_segments = hi_segments[(np.diff(hi_segments) >= min_segment).flatten(), :]
else:
long_enough_segments = np.array([])
logger.debug('good segments shape {}'.format(long_enough_segments.shape))
return long_enough_segments
def merge_near_segments(on_segs, min_silence=200):
# merge all segments distant less than min_silence
# need to have at least two bouts
if on_segs.shape[0] < 2:
logger.debug('Less than two zero segments, nothing to possibly merge')
long_segments = on_segs
else:
of_on = on_segs.flatten()[1:]
silence = np.diff(of_on)[::2]
long_silence = np.where(silence > min_silence)[0]
if(long_silence.size == 0):
logger.debug('No long silences found, all is one big bout')
of_keep = np.append((on_segs[long_silence, 1]), on_segs[-1, 1])
on_keep = np.append(on_segs[0,0], on_segs[long_silence + 1, 0])
long_segments = np.vstack([on_keep, of_keep]).T
return long_segments
def get_the_bouts(x, spec_par_rosa, loaded_p=None):
#
if loaded_p is not None:
p = loaded_p
logger.debug('loaded p with shape {}'.format(loaded_p.shape))
else:
logger.debug('Computing power')
p, _, _ = gimmepower(x, spec_par_rosa)
logger.debug('Finding on segments')
threshold = spec_par_rosa['thresh_rms'] * st.rms(p)
pk_threshold = spec_par_rosa['peak_thresh_rms'] * st.rms(p)
mean_rms_threshold = spec_par_rosa['mean_syl_rms_thresh'] * st.rms(p)
step_ms = spec_par_rosa['frame_shift_ms']
min_syl = spec_par_rosa['min_segment'] // step_ms
min_silence = spec_par_rosa['min_silence'] // step_ms
min_bout = spec_par_rosa['min_bout'] // step_ms
max_bout = spec_par_rosa['max_bout'] // step_ms
syllables = get_on_segments(p, threshold, min_syl, pk_threshold, mean_rms_threshold)
logger.debug('Found {} syllables'.format(syllables.shape[0]))
logger.debug('Merging segments with silent interval smaller than {} steps'.format(min_silence))
bouts = merge_near_segments(syllables, min_silence=min_silence)
logger.debug('Found {} bout candidates'.format(bouts.shape[0]))
if bouts.shape[0] > 0:
long_enough_bouts = bouts[((np.diff(bouts) >= min_bout) & (np.diff(bouts) < max_bout)).flatten(), :]
logger.debug('Removed shorter/longer than [{} ;{}], {} candidates left'.format(min_bout, max_bout,
long_enough_bouts.shape[0]))
else:
long_enough_bouts = bouts
power_values = [p[x[0]:x[1]] for x in long_enough_bouts]
return long_enough_bouts, power_values, p, syllables
def get_bouts_in_file(file_path, hparams, loaded_p=None):
# path of the wav_file
# h_params from the rosa spectrogram plus the parameters:
# 'read_wav_fun': load_couple, # function for loading the wav_like_stream (has to returns fs, ndarray)
# 'min_segment': 30, # Minimum length of supra_threshold to consider a 'syllable'
# 'min_silence': 200, # Minmum distance between groups of syllables to consider separate bouts
# 'bout_lim': 200, # same as min_dinscance !!! Clean that out!
# 'min_bout': 250, # min bout duration
# 'peak_thresh_rms': 2.5, # threshold (rms) for peak acceptance,
# 'thresh_rms': 1 # threshold for detection of syllables
# Decide and see if it CAN load the power
logger.info('Getting bouts for file {}'.format(file_path))
print('tu vieja file {}'.format(file_path))
sys.stdout.flush()
#logger.debug('s_f {}'.format(s_f))
# Get the bouts. If loaded_p is none, it will copute it
try:
s_f, wav_i = hparams['read_wav_fun'](file_path)
hparams['sample_rate'] = s_f
the_bouts, the_p, all_p, all_syl = get_the_bouts(wav_i, hparams, loaded_p=loaded_p)
except Exception as e:
warnings.warn('Error getting bouts for file {}'.format(file_path))
logger.info('error {}'.format(e))
print('error in {}'.format(file_path))
sys.stdout.flush()
# return empty DataFrame
the_bouts = np.empty(0)
wav_i = np.empty(0)
all_p = np.empty(0)
if the_bouts.size > 0:
step_ms = hparams['frame_shift_ms']
pk_dist = hparams['min_segment']
bout_pd = pd.DataFrame(the_bouts * step_ms, columns=['start_ms', 'end_ms'])
bout_pd['start_sample'] = bout_pd['start_ms'] * (s_f//1000)
bout_pd['end_sample'] = bout_pd['end_ms'] * (s_f//1000)
bout_pd['p_step'] = the_p
# the extrema over the file
bout_pd['rms_p'] = st.rms(all_p)
bout_pd['peak_p'] = bout_pd['p_step'].apply(np.max)
# check whether the peak power is larger than hparams['peak_thresh_rms'] times the rms through the file
bout_pd['bout_check'] = bout_pd.apply(lambda row: \
(row['peak_p'] > hparams['peak_thresh_rms'] * row['rms_p']),
axis=1)
bout_pd['file'] = file_path
bout_pd['len_ms'] = bout_pd.apply(lambda r: r['end_ms'] - r['start_ms'], axis=1)
syl_pd = pd.DataFrame(all_syl * step_ms, columns=['start_ms', 'end_ms'])
bout_pd['syl_in'] = bout_pd.apply(lambda r: \
syl_pd[(syl_pd['start_ms'] >= r['start_ms']) & \
(syl_pd['start_ms'] <= r['end_ms'])].values,
axis=1)
bout_pd['n_syl'] = bout_pd['syl_in'].apply(len)
# get all the peaks larger than the threshold(peak_thresh_rms * rms)
bout_pd['peaks_p'] = bout_pd.apply(lambda r: peakutils.indexes(r['p_step'],
thres=hparams['peak_thresh_rms']*r['rms_p']/r['p_step'].max(),
min_dist=pk_dist//step_ms),
axis=1)
bout_pd['n_peaks'] = bout_pd['peaks_p'].apply(len)
bout_pd['l_p_ratio'] = bout_pd.apply(lambda r: np.nan if r['n_peaks']==0 else r['len_ms'] / (r['n_peaks']), axis=1)
try:
delta = int(hparams['waveform_edges'] * hparams['sample_rate'] * 0.001)
except KeyError:
delta = 0
bout_pd['waveform'] = bout_pd.apply(lambda df: wav_i[df['start_sample'] - delta: df['end_sample'] + delta], axis=1)
else:
bout_pd = pd.DataFrame()
return bout_pd, wav_i, all_p
def get_bouts_in_long_file(file_path, hparams, loaded_p=None, chunk_size=90000000):
# path of the wav_file
# h_params from the rosa spectrogram plus the parameters:
# 'read_wav_fun': load_couple, # function for loading the wav_like_stream (has to returns fs, ndarray)
# 'min_segment': 30, # Minimum length of supra_threshold to consider a 'syllable'
# 'min_silence': 200, # Minmum distance between groups of syllables to consider separate bouts
# 'bout_lim': 200, # same as min_dinscance !!! Clean that out!
# 'min_bout': 250, # min bout duration
# 'peak_thresh_rms': 2.5, # threshold (rms) for peak acceptance,
# 'thresh_rms': 1 # threshold for detection of syllables
# Decide and see if it CAN load the power
logger.info('Getting bouts for long file {}'.format(file_path))
print('tu vieja file {}'.format(file_path))
sys.stdout.flush()
#logger.debug('s_f {}'.format(s_f))
# Get the bouts. If loaded_p is none, it will copute it
s_f, wav_i = hparams['read_wav_fun'](file_path)
hparams['sample_rate'] = s_f
n_chunks = int(np.ceil(wav_i.shape[0]/chunk_size))
wav_chunks = np.array_split(wav_i, n_chunks)
logger.info('splitting file into {} chunks'.format(n_chunks))
chunk_start_sample = 0
bouts_pd_list = []
for i_chunk, wav_chunk in tqdm(enumerate(wav_chunks), total=n_chunks):
# get the bouts for a chunk
# offset the starts to the beginning of the chunk
# recompute the beginning of the next chunk
the_bouts, the_p, all_p, all_syl = get_the_bouts(wav_chunk, hparams, loaded_p=loaded_p)
chunk_offset_ms = int(1000 * chunk_start_sample / s_f)
# make one bouts pd dataframe for the chunk
if the_bouts.size > 0:
step_ms = hparams['frame_shift_ms']
pk_dist = hparams['min_segment']
bout_pd = pd.DataFrame(the_bouts * step_ms + chunk_offset_ms, columns=['start_ms', 'end_ms'])
bout_pd['start_sample'] = bout_pd['start_ms'] * (s_f//1000)
bout_pd['end_sample'] = bout_pd['end_ms'] * (s_f//1000)
bout_pd['p_step'] = the_p
# the extrema over the file
bout_pd['rms_p'] = st.rms(all_p)
bout_pd['peak_p'] = bout_pd['p_step'].apply(np.max)
# check whether the peak power is larger than hparams['peak_thresh_rms'] times the rms through the file
bout_pd['bout_check'] = bout_pd.apply(lambda row: \
(row['peak_p'] > hparams['peak_thresh_rms'] * row['rms_p']),
axis=1)
bout_pd['file'] = file_path
bout_pd['len_ms'] = bout_pd.apply(lambda r: r['end_ms'] - r['start_ms'], axis=1)
syl_pd = pd.DataFrame(all_syl * step_ms + + chunk_offset_ms, columns=['start_ms', 'end_ms'])
bout_pd['syl_in'] = bout_pd.apply(lambda r: \
syl_pd[(syl_pd['start_ms'] >= r['start_ms']) & \
(syl_pd['start_ms'] <= r['end_ms'])].values,
axis=1)
bout_pd['n_syl'] = bout_pd['syl_in'].apply(len)
# get all the peaks larger than the threshold(peak_thresh_rms * rms)
bout_pd['peaks_p'] = bout_pd.apply(lambda r: peakutils.indexes(r['p_step'],
thres=hparams['peak_thresh_rms']*r['rms_p']/r['p_step'].max(),
min_dist=pk_dist//step_ms),
axis=1)
bout_pd['n_peaks'] = bout_pd['peaks_p'].apply(len)
bout_pd['l_p_ratio'] = bout_pd.apply(lambda r: np.nan if r['n_peaks']==0 else r['len_ms'] / (r['n_peaks']), axis=1)
# ### refer the starts, ends to the beginning of the chunk
# delta_l = -1*delta - chunk_start_sample
# delta_r = delta - cunk_start_sample
# bout_pd['waveform'] = bout_pd.apply(lambda df: wav_chunk[df['start_sample'] + delta_l: df['end_sample'] + delta_r], axis=1)
else:
bout_pd = | pd.DataFrame() | pandas.DataFrame |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([True, np.nan, False], index=index, dtype=object)
tm.assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype="M8[ns]")
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range("20090415", "20090519", freq="B")
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize(
"input",
[
[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(["a", "b", "a"]),
(i for i in range(3)),
map(lambda x: x, range(3)),
],
)
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = "Length of passed values is 3, index implies 4"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype="int64")
expected = Series(100, index=np.arange(4), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
msg = "Length of passed values is 1, index implies 3"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8")
assert s.dtype == np.dtype("i8")
s = Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8")
assert s.dtype == np.dtype("f8")
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.0], np.array([1.0])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.0
assert not x.equals(y)
assert x[0] == 2.0
assert y[0] == 1.0
@pytest.mark.parametrize(
"index",
[
pd.date_range("20170101", periods=3, tz="US/Eastern"),
pd.date_range("20170101", periods=3),
pd.timedelta_range("1 day", periods=3),
pd.period_range("2012Q1", periods=3, freq="Q"),
pd.Index(list("abc")),
pd.Int64Index([1, 2, 3]),
pd.RangeIndex(0, 3),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_limit_copies(self, index):
# GH 17449
# limit copies of input
s = pd.Series(index)
# we make 1 copy; this is just a smoke test here
assert s._mgr.blocks[0].values is not index
def test_constructor_pass_none(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(None, index=range(5))
assert s.dtype == np.float64
s = Series(None, index=range(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
tm.assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp = Series([np.nan, np.nan], dtype=np.float64)
assert exp.dtype == np.float64
tm.assert_series_equal(Series([np.nan, np.nan]), exp)
tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp)
exp = Series([pd.NaT, pd.NaT])
assert exp.dtype == "datetime64[ns]"
tm.assert_series_equal(Series([pd.NaT, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, pd.NaT])), exp)
tm.assert_series_equal(Series([pd.NaT, np.nan]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, np.nan])), exp)
tm.assert_series_equal(Series([np.nan, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp)
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Series(["a", "b", "c"], dtype=float)
def test_constructor_unsigned_dtype_overflow(self, uint_dtype):
# see gh-15832
msg = "Trying to coerce negative values to unsigned integers"
with pytest.raises(OverflowError, match=msg):
Series([-1], dtype=uint_dtype)
def test_constructor_coerce_float_fail(self, any_int_dtype):
# see gh-15832
msg = "Trying to coerce float values to integers"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3.5], dtype=any_int_dtype)
def test_constructor_coerce_float_valid(self, float_dtype):
s = Series([1, 2, 3.5], dtype=float_dtype)
expected = Series([1, 2, 3.5]).astype(float_dtype)
tm.assert_series_equal(s, expected)
def test_constructor_dtype_no_cast(self):
# see gh-1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
assert s[1] == 5
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly inferring on dateimelike looking when object dtype is
# specified
s = Series([Timestamp("20130101"), "NOV"], dtype=object)
assert s.iloc[0] == Timestamp("20130101")
assert s.iloc[1] == "NOV"
assert s.dtype == object
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
belly = "216 3T19".split()
wing1 = "2T15 4H19".split()
wing2 = "416 4T20".split()
mat = pd.to_datetime("2016-01-22 2019-09-07".split())
df = pd.DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly)
result = df.loc["3T19"]
assert result.dtype == object
result = df.loc["216"]
assert result.dtype == object
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [
np.array([None, None, None, None, datetime.now(), None]),
np.array([None, None, datetime.now(), None]),
]:
result = Series(arr)
assert result.dtype == "M8[ns]"
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
assert isna(s).all()
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(iNaT, index=range(5))
assert not isna(s).all()
s = Series(np.nan, dtype="M8[ns]", index=range(5))
assert isna(s).all()
s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
s = Series([datetime(2001, 1, 2, 0, 0), np.nan], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
assert s.dtype == "M8[ns]"
s.iloc[0] = np.nan
assert s.dtype == "M8[ns]"
# GH3414 related
expected = Series(
[datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)],
dtype="datetime64[ns]",
)
result = Series(Series(dates).astype(np.int64) / 1000000, dtype="M8[ms]")
tm.assert_series_equal(result, expected)
result = Series(dates, dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
expected = Series(
[pd.NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]"
)
result = Series([np.nan] + dates[1:], dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
dts = Series(dates, dtype="datetime64[ns]")
# valid astype
dts.astype("int64")
# invalid casting
msg = r"cannot astype a datetimelike from \[datetime64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
dts.astype("int32")
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(dts, dtype=np.int64)
expected = Series(dts.astype(np.int64))
tm.assert_series_equal(result, expected)
# invalid dates can be help as object
result = Series([datetime(2, 1, 1)])
assert result[0] == datetime(2, 1, 1, 0, 0)
result = Series([datetime(3000, 1, 1)])
assert result[0] == datetime(3000, 1, 1, 0, 0)
# don't mix types
result = Series([Timestamp("20130101"), 1], index=["a", "b"])
assert result["a"] == Timestamp("20130101")
assert result["b"] == 1
# GH6529
# coerce datetime64 non-ns properly
dates = date_range("01-Jan-2015", "01-Dec-2015", freq="M")
values2 = dates.view(np.ndarray).astype("datetime64[ns]")
expected = Series(values2, index=dates)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, dates)
tm.assert_series_equal(result, expected)
# GH 13876
# coerce to non-ns to object properly
expected = Series(values2, index=dates, dtype=object)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, index=dates, dtype=object)
tm.assert_series_equal(result, expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()], dtype=object)
series1 = Series(dates2, dates)
tm.assert_numpy_array_equal(series1.values, dates2)
assert series1.dtype == object
# these will correctly infer a datetime
s = Series([None, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([np.nan, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, None, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, np.nan, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range("20130101", periods=3)
assert Series(dr).iloc[0].tz is None
dr = date_range("20130101", periods=3, tz="UTC")
assert str(Series(dr).iloc[0].tz) == "UTC"
dr = date_range("20130101", periods=3, tz="US/Eastern")
assert str(Series(dr).iloc[0].tz) == "US/Eastern"
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
assert s.dtype == "object"
assert s[2] is np.nan
assert "NaN" in str(s)
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range("20130101", periods=3, tz="US/Eastern")
s = Series(dr)
assert s.dtype.name == "datetime64[ns, US/Eastern]"
assert s.dtype == "datetime64[ns, US/Eastern]"
assert is_datetime64tz_dtype(s.dtype)
assert "datetime64[ns, US/Eastern]" in str(s)
# export
result = s.values
assert isinstance(result, np.ndarray)
assert result.dtype == "datetime64[ns]"
exp = pd.DatetimeIndex(result)
exp = exp.tz_localize("UTC").tz_convert(tz=s.dt.tz)
tm.assert_index_equal(dr, exp)
# indexing
result = s.iloc[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[Series([True, True, False], index=s.index)]
tm.assert_series_equal(result, s[0:2])
result = s.iloc[0:1]
tm.assert_series_equal(result, Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1], s.iloc[1:]])
tm.assert_series_equal(result, s)
# short str
assert "datetime64[ns, US/Eastern]" in str(s)
# formatting with NaT
result = s.shift()
assert "datetime64[ns, US/Eastern]" in str(result)
assert "NaT" in str(result)
# long str
t = Series(date_range("20130101", periods=1000, tz="US/Eastern"))
assert "datetime64[ns, US/Eastern]" in str(t)
result = pd.DatetimeIndex(s, freq="infer")
tm.assert_index_equal(result, dr)
# inference
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
]
)
assert s.dtype == "datetime64[ns, US/Pacific]"
assert lib.infer_dtype(s, skipna=True) == "datetime64"
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"),
]
)
assert s.dtype == "object"
assert | lib.infer_dtype(s, skipna=True) | pandas._libs.lib.infer_dtype |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.7
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Import Scripts
# %reload_ext autoreload
# %autoreload 2
import sys
from pathlib import Path
import os
source_path = str(Path(os.path.abspath('joaoc-evaluation@%')).parent.parent / 'src')
if source_path not in sys.path:
sys.path.insert(0, source_path)
# Packages
import pandas as pd
pd.options.display.max_columns = 999
import warnings
warnings.filterwarnings('ignore')
# -
from model_selection import plotting, select_model
# +
from sklearn import preprocessing
import pandas as pd
from collections import defaultdict
import copy
import yaml
from pipeline.data_persistence import persist_local, get_local
# -
evaluation = """
evaluation:
metrics: [recall, precision, f1, missed, size]
parameters:
at%: [1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100]
lower_bound: 0
upper_bound: 300
groups:
CD: director1
CO: director2
LPN: director3
LPI: director3
LC: director3
CE: director3
"""
evaluation = yaml.load(evaluation)
experiment_id = 7136875758
features = get_local({'experiment_id': experiment_id}, 'features').set_index('id_llamado')
labels = get_local({'experiment_id': experiment_id}, 'labels').set_index('id_llamado')
length = 10000
labels = labels.sort_values(by='reception_date', ascending=False)
predictions = labels[:length][[]]
predictions['prediction'] = [random.random() for i in range(length)]
observertions = labels[:length]
# +
def classifing(df, args):
res = []
for i in args['at%']:
size = min(args['upper_bound'], max(args['lower_bound'], int(i / 100 * len(df))))
# store (perc, missed complaints, size)
res.append((i, df[size:].sum(), size,))
return res
def adding_up(df):
res = pd.DataFrame()
for i, a in df.iterrows():
new = pd.DataFrame(a['target'], columns=['%', 'missed', 'size']).set_index('%')
if len(res) == 0:
res = new
else:
res['missed'] = new['missed'] + res['missed']
res['size'] = new['size'] + res['size']
return res
def generate_name(row, metric):
return '{}@{}@{}'.format(metric, row["%"], row["groups"])
def apply_metric(df, metric):
metrics_func = dict(
recall = lambda x: (1 - x['missed'] / x['target']),
precision = lambda x: (x['target'] - x['missed']) / x['size'],
f1 = lambda x: 1,
missed = lambda x: x['missed'],
size = lambda x: x['size']
# 2 * ((x['target'] - x['missed']) / x['size'] * (1 - x['missed'] / x['target'])) / ((x['target'] - x['missed']) / x['size'] + (1 - x['missed'] / x['target']))
)
return df.apply(lambda row: {'name': generate_name(row, metric),
'value': metrics_func[metric](row)}, 1).tolist()
def calculate_metrics(df, results, metrics):
df_temp = df.query("groups != 'overall'").groupby('%').sum()
df_temp['groups'] = 'overall_groups'
df = pd.concat([df.reset_index(), df_temp.reset_index()])
total_complaints = results.groupby('groups')['target'].sum()
soma = total_complaints.sum() / 2
total_complaints['overall_groups'] = soma
total_complaints['overall'] = soma
df = df.reset_index().merge(total_complaints.reset_index(), on='groups')
results = []
for metric in metrics:
results = results + apply_metric(df, metric)
return results
def apply_daily_evaluation(predictions, observations, evaluation):
results = predictions.merge(observations, right_index=True, left_index=True)\
.sort_values(by='prediction', ascending=False)
results['groups'] = results['tipo_procedimiento_codigo'].apply(lambda x: evaluation['groups'][x])
temp = results.copy()
temp['groups'] = 'overall'
results = pd.concat([temp, results])
df = results.groupby([results['reception_date'].dt.date, 'groups'])['target']\
.apply(classifing, args=evaluation['parameters']).to_frame()
df = df.reset_index().groupby('groups').apply(adding_up)
results = calculate_metrics(df, results, evaluation['metrics'])
return results
def get_params_for_precision(obs, pred, precision_at):
precision, recall, thresh = precision_recall_curve(obs['target'], pred['prediction'])
df = pd.DataFrame({'precision': precision[:-1], 'thresh': thresh, 'recall': recall[:-1]})
res = []
for prec in precision_at:
if len(df[df['precision'] >= prec]['recall']) > 0:
res.append({'name': f'precision@{prec}@recall',
'value': df[df['precision'] >= prec]['recall'].values[0]})
else:
res.append({'name': f'precision@{prec}@recall',
'value': 0})
res.append({'name': f'precision@{prec}@%',
'value': len(df[df['precision'] >= prec]) / len(df) * 100})
return res
def evaluate(obs, pred, evaluation):
evaluations = apply_daily_evaluation(pred, obs, evaluation)
evaluations = evaluations + get_params_for_precision(obs, pred, evaluation['parameters']['precision_at'])
return evaluations
# -
from copy import deepcopy
df = apply_daily_evaluation(predictions, observertions, evaluation['evaluation'])
df
total_complaints = results.groupby('groups')['target'].sum()
total_complaints['overall_groups'] = total_complaints.sum()
total_complaints['overall'] = total_complaints['overall_groups']
total_complaints
ax = df.query('groups == "overall"').plot(x='%', y='recall', label='overall')
ax = df.query('groups == "director1"').plot(x='%', y='recall', ax=ax, label='director1')
ax = df.query('groups == "director2"').plot(x='%', y='recall', ax=ax, label='director2')
ax = df.query('groups == "director3"').plot(x='%', y='recall', ax=ax, label='director3')
ax = pd.DataFrame({'%': [0,100], 'recall': [0,1]}).plot(x='%', y='recall', ax=ax, label='straight')
# +
def predict_given_at_1(df, args):
df['pred_target'] = df.index.isin(df.nlargest(int(args[0] / 100 * len(df)), # percentage of list
columns='prediction').index)
df['pred_target'] = df['pred_target'].apply(int)
return df
def predict_given_at_2(df, args):
df['pred_target'] = df.index.isin(df.iloc[:(int(50 / 100 * len(df)))].index)
df['pred_target'] = df['pred_target'].apply(int)
return df
def predict_given_at_pass(df, args):
return df
def predict_given_multiple(df, args):
for perc in args[0]:
df[f'pred_target_{perc}'] = df.index.isin(df.iloc[:(int(perc / 100 * len(df)))].index)
# df[f'pred_target_{perc}'] = df[f'pred_target_{perc}'].apply(int)
return df
# -
# %timeit results.groupby([results['reception_date'].dt.date, 'groups']).apply(predict_given_at_1, args=(50,))
# %timeit results.groupby([results['reception_date'].dt.date, 'groups']).apply(predict_given_at_2, args=(50,))
# %timeit results.groupby([results['reception_date'].dt.date, 'groups']).apply(predict_given_at_pass, args=(50,))
args = [10, 20, 30, 40, 50, 60, 70, 80, 90]
# %timeit results.groupby([results['reception_date'].dt.date, 'groups']).apply(predict_given_multiple, args=(args,))
import numpy as np
def return_numbers(df):
res = []
for i in range(10):
size = int(i / 10 * len(df))
res.append((df[size:].sum(), df[:size].sum() / size,))
return res
def return_numbers_1(df):
size = int(50 / 100 * len(df))
return df[size:].sum(), df[:size].sum() / size
def return_numbers_multiple(df, args):
res = []
for i in args['%']:
size = min(args['upper_bound'], max(args['lower_bound'], int(i / 100 * len(df))))
# store {perc: (missed complaints, size)}
res.append({i: (df[size:].sum(), size)})
return res
def return_0(df):
return 0
# %timeit results.groupby([results['reception_date'].dt.date, 'groups'])['target'].apply(return_numbers_1).to_frame()
# +
args = {'%': [5, 10, 20, 30, 40, 50, 60, 70, 80, 90],
'lower_bound': 1,
'upper_bound': 30}
# %timeit results.groupby([results['reception_date'].dt.date, 'groups'])['target'].apply(return_numbers_multiple, args=args).to_frame()
# -
results.groupby([results['reception_date'].dt.date, 'groups'])['target'].apply(return_numbers_multiple, args={'%': args,
'lower_bound': 1,
'upper_bound': 30}).to_frame()
results.groupby([results['reception_date'].dt.date, 'groups'])['target'].apply(return_numbers_multiple, args=(args,)).to_frame()
# %timeit results.groupby([results['reception_date'].dt.date, 'groups'])['target'].apply(return_numbers).to_frame()
# %timeit results.groupby([results['reception_date'].dt.date, 'groups'])['target'].apply(return_0).to_frame()
results.groupby([results['reception_date'].dt.date, 'groups'])['target'].apply(return_0).to_frame()
time = 0.295
4.3 * 8 * 11 * 40 / 60 / 60
def recall(results):
total_complaints = (results['target'] == 1).sum()
if total_complaints:
recall = sum((results['pred_target'] == 1) & (results['target'] == 1)) / total_complaints
else:
recall = 1
return recall
def precision(results):
return sum((results['pred_target'] == 1) & (results['target'] == 1)) / sum(results['pred_target'] == 1)
from sklearn.metrics import precision_recall_curve
import random
import numpy as np
y_true = pd.Series([random.choice([0,1]) for i in range(1000)])
probas_pred = pd.Series([uniform(0,1) for i in range(1000)])
def get_params_for_precision(obs, pred, precision_at):
precision, recall, thresh = precision_recall_curve(obs['target'], pred['prediction'])
df = pd.DataFrame({'precision': precision[1:], 'thresh': thresh, 'recall': recall[1:]})
res = []
for prec in precision_at:
res.append({'metric': f'precision@{prec}@recall',
'value': df[df['precision'] >= prec]['recall'].values[0]})
res.append({'metric': f'precision@{prec}@%',
'value': len(df[df['precision'] >= prec]) / len(df) * 100})
return res
'asdf'['a']
ax = | pd.DataFrame({'x':thresh, 'y':precision[1:]}) | pandas.DataFrame |
#dependencies
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import confusion_matrix
from sklearn import svm
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import normalize
import itertools
import matplotlib.pyplot as plt
import pandas as pd
#function defination to plot the confusion matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
train_data = pd.read_csv('criminal_train.csv')
y_train = train_data['Criminal']
X_train = train_data.drop(['PERID','Criminal'],axis = 1).values
X_train = normalize(X_train, axis = 0)
test_data = pd.read_csv('criminal_test.csv')
X_test = test_data.drop(['PERID'],axis = 1).values
X_test = normalize(X_test, axis = 0)
#model structure
model = VotingClassifier(
estimators=[
( 'gb',GradientBoostingClassifier(n_estimators=500,verbose =1,max_depth = 6 )),
('rf', RandomForestClassifier(n_estimators=1000, verbose = 1))],
voting='soft')
model = AdaBoostClassifier(base_estimator= model, n_estimators =10 )
#training the model
print('training the model: ')
model.fit(X_train, y_train)
print('model trained: ')
model.score(X_train,y_train)
X_train, X_test, y_train, y_test = train_test_split(X_train ,y_train, train_size = .9)
model.fit(X_train, y_train)
model.score(X_test, y_test)
#####################################################
#predicting values on test file
df = pd.read_csv('criminal_test.csv')
predicted = model.predict(X_test)
frame = pd.DataFrame()
frame['PERID'] = df['PERID']
frame['Criminal'] = predicted
frame.to_csv('output_old.csv', index = False)
###############################################################
print(model.score(X_train, y_train))
# predicted value
predicted_y = model.predict(X_train)
#plot the confusion matrix
cnf_matrix = confusion_matrix(y_train, predicted_y)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=[0,1],
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=[0,1], normalize=True,
title='Normalized confusion matrix')
plt.show()
################################################3
#input file
df = pd.read_csv('criminal_train.csv')
# KNN classifier
model_knn =KNeighborsClassifier(n_neighbors= 5, weights='distance', n_jobs = 4)
model_knn.fit(X_train, y_train)
model_knn.score(X_train,y_train)
predicted = model_knn.predict(X_train)
df = pd.read_csv('criminal_train.csv')
frame = pd.DataFrame()
frame['PERID'] = df['PERID']
frame['Criminal'] = predicted
frame.to_csv('input_knn.csv', index = False)
## random forest classifier
model_rf =RandomForestClassifier(n_estimators=1000, verbose = 1)
model_rf.fit(X_train, y_train)
df = pd.read_csv('criminal_train.csv')
predicted = model_rf.predict(X_train)
frame = pd.DataFrame()
frame['PERID'] = df['PERID']
frame['Criminal'] = predicted
frame.to_csv('input_rf.csv', index = False)
# ada boosting clssifier
model_ab = AdaBoostClassifier(n_estimators=500)
model_ab.fit(X_train, y_train)
df = pd.read_csv('criminal_train.csv')
X_test = df.drop(['PERID'],axis =1).values
predicted = model_ab.predict(X_train)
frame = pd.DataFrame()
frame['PERID'] = df['PERID']
frame['Criminal'] = predicted
frame.to_csv('input_ab.csv', index = False)
### gradient boosting classifier
model_gb = GradientBoostingClassifier(n_estimators=500,verbose =1,max_depth = 6 )
model_gb.fit(X_train, y_train)
df = pd.read_csv('criminal_train.csv')
X_test = df.drop(['PERID'],axis =1).values
predicted = model_gb.predict(X_train)
frame = pd.DataFrame()
frame['PERID'] = df['PERID']
frame['Criminal'] = predicted
frame.to_csv('input_gb.csv', index = False)
#logistic regression
model_lr =LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=.3)
model_lr.fit(X_train, y_train)
predicted = model_lr.predict(X_train)
frame = pd.DataFrame()
frame['PERID'] = df['PERID']
frame['Criminal'] = predicted
frame.to_csv('input_lr.csv', index = False)
## support vector machines
model_svm =svm.SVC(C=.75, verbose = True)
model_svm.fit(X_train, y_train)
predicted = model_svm.predict(X_train)
frame = pd.DataFrame()
frame['PERID'] = df['PERID']
frame['Criminal'] = predicted
frame.to_csv('input_svm.csv', index = False)
##############################################
### output file
test_data = pd.read_csv('criminal_test.csv')
X_test = test_data.drop(['PERID'],axis = 1).values
X_test = normalize(X_test, axis = 0)
# KNN classifier
predicted = model_knn.predict(X_test)
df = | pd.read_csv('criminal_test.csv') | pandas.read_csv |
# TODO move away from this test generator style since its we need to manage the generator file,
# which is no longer in this project workspace, as well as the output test file.
## ##
# #
# THIS TEST WAS AUTOGENERATED BY groupby_test_generator.py #
# #
##
# TODO refactor this into table driven tests using pytest parameterize since each test body follows the same structure
# and a single test body with multiple test tabe entries will be more readable and flexible.
from .groupby_unit_test_parameters import *
import pandas as pd
import riptable as rt
import unittest
class autogenerated_gb_tests(unittest.TestCase):
def safe_assert(self, ary1, ary2):
for a, b in zip(ary1, ary2):
if a == a and b == b:
self.assertAlmostEqual(a, b, places=7)
def test_multikey___aggs_median__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(1, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(4, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(7, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(2, 2, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(5, 2, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(1, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(4, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(7, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(2, 1, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(5, 1, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(1, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(4, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(7, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(2, 3, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(5, 3, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 2, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 2, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 1, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 1, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 3, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 3, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 2, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 2, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 1, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 1, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 3, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 3, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_1__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_4__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_7__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_2__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 2, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_5__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 2, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_1__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_4__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_7__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 1, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 1, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 3, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 3, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(1, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(4, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(7, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(2, 2, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(5, 2, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(1, 3, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(4, 3, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(7, 3, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(2, 1, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(5, 1, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(1, 2, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(4, 2, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(7, 2, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(2, 3, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(5, 3, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(1, 1, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(4, 1, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(7, 1, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(2, 2, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(5, 2, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(1, 3, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(4, 3, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(7, 3, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(2, 1, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(5, 1, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(1, 2, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(4, 2, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(7, 2, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(2, 3, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(5, 3, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_1__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(1, 1, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_4__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(4, 1, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_7__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(7, 1, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_2__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(2, 2, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_5__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(5, 2, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_1__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(1, 3, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_4__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(4, 3, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_7__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(7, 3, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(2, 1, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(5, 1, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(1, 2, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(4, 2, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(7, 2, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(2, 3, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(5, 3, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_1__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(1, 1, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_4__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(4, 1, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_7__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(7, 1, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_2__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(2, 2, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_5__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(5, 2, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_1__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(1, 3, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_4__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(4, 3, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_7__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(7, 3, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(2, 1, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(5, 1, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(1, 2, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(4, 2, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(7, 2, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(2, 3, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(5, 3, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['sum']
test_class = groupby_everything(1, 1, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['sum']
test_class = groupby_everything(4, 1, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['sum']
test_class = groupby_everything(7, 1, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['sum']
test_class = groupby_everything(2, 2, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['sum']
test_class = groupby_everything(5, 2, 0.1, ['sum'])
pd_out = (
| pd.DataFrame(test_class.data) | pandas.DataFrame |
#%%
# ANCHOR IMPORTS
import sys
import pandas as pd, numpy as np
import pickle
import re
from sklearn import feature_extraction , feature_selection
from scipy.sparse import csr_matrix, hstack
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import Normalizer
from tqdm.autonotebook import trange, tqdm
import swifter
# Libraries for feature engineering.
import string
from collections import Counter # not necessary?
#from nnsplit import NNSplit
import spacy# .tokenizer.tokenize
from spellchecker import SpellChecker
# Other neat features.
from nltk.metrics.distance import edit_distance
from lexicalrichness import LexicalRichness
import syllables
import itertools
import textstat
# Stats
from scipy.stats import chisquare
#from statistics import mean
#%% Get spacy docs and save them to data to speed up development.
def get_docs(data, text_col='text_clean'):
nlp = spacy.load('en_core_web_sm')
nlp.enable_pipe("senter")
data['docs'] = data[tect_col].apply(lambda x: nlp(x))
#%%
def listify(series, feature_name=str):
return [{feature_name: x[1]} for x in series.items()]
#%%
# Extract Baseline feature
# Character trigrams (morphological/lexical/semantic?).
def ngrams(train, test, params):
"""Extract character ngrams.
Args:
train (list): list of texts to fit the vectorizer.
test (list): list of texts to transform to feature space.
params (dict): parameters for the vectorizer construction
Returns:
[type]: [description]
"""
vectorizer = CountVectorizer(lowercase=params['ngrams']['lowercase'],
ngram_range=params['ngrams']['size'], # experiment with ranges, e.g. ngram_range=(3,3)
analyzer=params['ngrams']['type'], #, also try "char_wb"
max_features=params['ngrams']['max_vocab']) # max_features=10000
# fit count vecotorizer to preprocessed tweets.
#vectorizer.fit(train)
# Transform into input vectors for train and test data.
train_vectors = vectorizer.fit_transform(train) # using fit_transform due to better implementation.
#train_vectors = vectorizer.transform(train) #.toarray()
test_vectors = vectorizer.transform(test) #.toarray()
# Inspect with vectorizer.get_feature_names() and .toarray()
#inverse = vectorizer.inverse_transform(train)
#feature_names = vectorizer.get_feature_names()
#print(f'Train ({type(train_vectors)}) feature matrix has shape: {train_vectors.shape}')
#print(f'Test ({type(test_vectors)}) feature matrix has shape: {test_vectors.shape}')
#return vectorizer
return vectorizer, train_vectors , test_vectors
#return inverse
#%% ANCHOR EXTRACT LIWC
def parse_liwc(file, **args):
"""Parse a (left) aligned version of the LIWC lexicon.
Args:
file (str): filepath to lexcion (excel).
Returns:
DataFrame: df or dict
"""
df = pd.read_excel(file, skiprows=2)
# Handling merged columns in file
### Adapted from https://stackoverflow.com/a/64179518 ###
df.columns = df.columns.to_series()\
.replace('Unnamed:\s\d+', np.nan, regex=True).ffill().values
# Multindex to represent multiple columns for some categories.
df.columns = pd.MultiIndex.from_tuples([(x, y)for x, y in
zip(df.columns, df.columns.to_series().groupby(level=0).cumcount())])
### Accessed 26-04-2021 ###
# d = data.to_dict(orient='list')
### Adapted from https://stackoverflow.com/a/50082926
# dm = data.melt()
# data = dm.set_index(['variable', dm.groupby('variable').cumcount()]).sort_index()['value'].unstack(0)
### Accessed 26-04-2021 ###
# Concat the terms by column.
# d = dict()
#d = {column: value for key, value in dd.items()}
# for ki, wl in dd.items():
# nl = []
# k, i = ki
# # for w in wl:
# # if w not in nl:
# # d[k].append(wl)
# if k in d:
# d[k].append(wl)
# else:
# d[k] = wl
### Solution from https://stackoverflow.com/a/48298420 ###
# TODO experiment with not sorting the index? or reesrorting columns to mach the multiindex or just original df.columns.
df = df.stack().sort_index(level=1).reset_index(drop=True)
### Accessed 26-04-2021 ###
# Check that merged columns have the right number of terms.
# sum(isinstance(x, str) for x in terms['Funct'])
return df.to_dict(orient='list')
#%%
# Extract LIWC matches (lexical/semantic)
def liwc_match(parsed, d, extract=False, text_col='text_clean'):
"""Search a corpus for matches against LIWC (2007) categories.
Args:
parsed (DataFrame): a pandas df with the all categories of LIWC prepared.
d (str): a filepath to a pickle file with a corpus to search.
extract (bool, optional): Switch specifying whether or not to return a Dict for feature extraction or feature inspection/analysis. Defaults to False.
Returns:
dict: a dict with {liwc_cat1...n : count} for each datapoint in the corpus OR a dict a, a dataFrame and a Series with results of searching the categories against the matches (absolute counts per datapoint (as dict and DF) totals per category (Series)).
"""
# load data to search.
# Could do Series.count(regex) or df[clean_text] -> (joined) list?
if isinstance(d, pd.DataFrame) == False: # the ... analysis case.
data = | pd.read_pickle(d) | pandas.read_pickle |
import vectorbt as vbt
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from vectorbt.generic import nb as generic_nb
from vectorbt.generic.enums import range_dt
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
mask = pd.DataFrame([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]), columns=['a', 'b', 'c'])
ts = pd.Series([1., 2., 3., 2., 1.], index=mask.index)
price = pd.DataFrame({
'open': [10, 11, 12, 11, 10],
'high': [11, 12, 13, 12, 11],
'low': [9, 10, 11, 10, 9],
'close': [11, 12, 11, 10, 9]
})
group_by = pd.Index(['g1', 'g1', 'g2'])
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# accessors.py ############# #
class TestAccessors:
def test_indexing(self):
assert mask.vbt.signals['a'].total() == mask['a'].vbt.signals.total()
def test_freq(self):
assert mask.vbt.signals.wrapper.freq == day_dt
assert mask['a'].vbt.signals.wrapper.freq == day_dt
assert mask.vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert mask['a'].vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert pd.Series([False, True]).vbt.signals.wrapper.freq is None
assert pd.Series([False, True]).vbt.signals(freq='3D').wrapper.freq == day_dt * 3
assert pd.Series([False, True]).vbt.signals(freq=np.timedelta64(4, 'D')).wrapper.freq == day_dt * 4
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(mask['a'].vbt.signals.fshift(test_n), mask['a'].shift(test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.fshift(test_n).values,
generic_nb.fshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.fshift(test_n), mask.shift(test_n, fill_value=False))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_bshift(self, test_n):
pd.testing.assert_series_equal(
mask['a'].vbt.signals.bshift(test_n),
mask['a'].shift(-test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.bshift(test_n).values,
generic_nb.bshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.bshift(test_n), mask.shift(-test_n, fill_value=False))
def test_empty(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty(5, index=np.arange(10, 15), name='a'),
pd.Series(np.full(5, False), index=np.arange(10, 15), name='a')
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty((5, 3), index=np.arange(10, 15), columns=['a', 'b', 'c']),
pd.DataFrame(np.full((5, 3), False), index=np.arange(10, 15), columns=['a', 'b', 'c'])
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty_like(mask['a']),
pd.Series(np.full(mask['a'].shape, False), index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty_like(mask),
pd.DataFrame(np.full(mask.shape, False), index=mask.index, columns=mask.columns)
)
def test_generate(self):
@njit
def choice_func_nb(from_i, to_i, col, n):
if col == 0:
return np.arange(from_i, to_i)
elif col == 1:
return np.full(1, from_i)
else:
return np.full(1, to_i - n)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate(5, choice_func_nb, 1, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate((5, 2), choice_func_nb, 1)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate(
(5, 3), choice_func_nb, 1, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, False],
[True, False, False],
[True, False, False],
[True, False, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate(
(5, 3), choice_func_nb, 1, pick_first=True, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_both(self):
@njit
def entry_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
@njit
def exit_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((mask.shape[0],), dtype=np.int_)
en, ex = pd.Series.vbt.signals.generate_both(
5, entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, True, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name, entry_wait=1, exit_wait=0)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name, entry_wait=0, exit_wait=1)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
@njit
def entry_func2_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
if from_i + 1 < to_i:
temp_int[1] = from_i + 1
return temp_int[:2]
return temp_int[:1]
@njit
def exit_func2_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
if from_i + 1 < to_i:
temp_int[1] = from_i + 1
return temp_int[:2]
return temp_int[:1]
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func2_nb, (temp_int,), exit_func2_nb, (temp_int,),
entry_pick_first=False, exit_pick_first=False,
index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[False, False, False],
[False, False, False],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, True],
[True, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_exits(self):
@njit
def choice_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((mask.shape[0],), dtype=np.int_)
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func_nb, temp_int, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
@njit
def choice_func2_nb(from_i, to_i, col, temp_int):
for i in range(from_i, to_i):
temp_int[i - from_i] = i
return temp_int[:to_i - from_i]
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func2_nb, temp_int, until_next=False, pick_first=False),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[True, True, False],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
mask2 = | pd.Series([True, True, True, True, True], index=mask.index) | pandas.Series |
import numpy as np
import pandas as pd
import datetime
from TransactionEngine import dictionaries
def generate_transactions(date, size=10000):
df = | pd.DataFrame() | pandas.DataFrame |
""" Panel4D: a 4-d dict like collection of panels """
import warnings
from pandas.core.generic import NDFrame
from pandas.core.panelnd import create_nd_panel_factory
from pandas.core.panel import Panel
from pandas.util._validators import validate_axis_style_args
Panel4D = create_nd_panel_factory(klass_name='Panel4D',
orders=['labels', 'items', 'major_axis',
'minor_axis'],
slices={'labels': 'labels',
'items': 'items',
'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer=Panel,
aliases={'major': 'major_axis',
'minor': 'minor_axis'}, stat_axis=2,
ns=dict(__doc__="""
Panel4D is a 4-Dimensional named container very much like a Panel, but
having 4 named dimensions. It is intended as a test bed for more
N-Dimensional named containers.
.. deprecated:: 0.19.0
The recommended way to represent these types of n-dimensional data
are with the `xarray package <http://xarray.pydata.org/en/stable/>`__.
Pandas provides a `.to_xarray()` method to automate this conversion.
Parameters
----------
data : ndarray (labels x items x major x minor), or dict of Panels
labels : Index or array-like : axis=0
items : Index or array-like : axis=1
major_axis : Index or array-like: axis=2
minor_axis : Index or array-like: axis=3
dtype : dtype, default None
Data type to force, otherwise infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
"""))
def panel4d_init(self, data=None, labels=None, items=None, major_axis=None,
minor_axis=None, copy=False, dtype=None):
# deprecation GH13564
warnings.warn("\nPanel4D is deprecated and will be removed in a "
"future version.\nThe recommended way to represent "
"these types of n-dimensional data are with\n"
"the `xarray package "
"<http://xarray.pydata.org/en/stable/>`__.\n"
"Pandas provides a `.to_xarray()` method to help "
"automate this conversion.\n",
FutureWarning, stacklevel=2)
self._init_data(data=data, labels=labels, items=items,
major_axis=major_axis, minor_axis=minor_axis, copy=copy,
dtype=dtype)
def panel4d_reindex(self, labs=None, labels=None, items=None, major_axis=None,
minor_axis=None, axis=None, **kwargs):
# Hack for reindex_axis deprecation
# Ha, we used labels for two different things
# I think this will work still.
if labs is None:
args = ()
else:
args = (labs,)
kwargs_ = dict(labels=labels,
items=items,
major_axis=major_axis,
minor_axis=minor_axis,
axis=axis)
kwargs_ = {k: v for k, v in kwargs_.items() if v is not None}
# major = kwargs.pop("major", None)
# minor = kwargs.pop('minor', None)
# if major is not None:
# if kwargs.get("major_axis"):
# raise TypeError("Cannot specify both 'major' and 'major_axis'")
# kwargs_['major_axis'] = major
# if minor is not None:
# if kwargs.get("minor_axis"):
# raise TypeError("Cannot specify both 'minor' and 'minor_axis'")
# kwargs_['minor_axis'] = minor
if axis is not None:
kwargs_['axis'] = axis
axes = | validate_axis_style_args(self, args, kwargs_, 'labs', 'reindex') | pandas.util._validators.validate_axis_style_args |
import pandas as pd
import pytest
import featuretools as ft
from featuretools.entityset import EntitySet, Relationship
from featuretools.utils.cudf_utils import pd_to_cudf_clean
from featuretools.utils.gen_utils import import_or_none
cudf = import_or_none('cudf')
# TODO: Fix vjawa
@pytest.mark.skipif('not cudf')
def test_create_entity_from_cudf_df(pd_es):
cleaned_df = pd_to_cudf_clean(pd_es["log"].df)
log_cudf = cudf.from_pandas(cleaned_df)
print(pd_es["log"].variable_types)
cudf_es = EntitySet(id="cudf_es")
cudf_es = cudf_es.entity_from_dataframe(
entity_id="log_cudf",
dataframe=log_cudf,
index="id",
time_index="datetime",
variable_types=pd_es["log"].variable_types
)
pd.testing.assert_frame_equal(cleaned_df, cudf_es["log_cudf"].df.to_pandas(), check_like=True)
@pytest.mark.skipif('not cudf')
def test_create_entity_with_non_numeric_index(pd_es, cudf_es):
df = pd.DataFrame({"id": ["A_1", "A_2", "C", "D"],
"values": [1, 12, -34, 27]})
cudf_df = cudf.from_pandas(df)
pd_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=df,
index="id")
cudf_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=cudf_df,
index="id",
variable_types={"id": ft.variable_types.Id, "values": ft.variable_types.Numeric})
pd.testing.assert_frame_equal(pd_es['new_entity'].df.reset_index(drop=True), cudf_es['new_entity'].df.to_pandas())
@pytest.mark.skipif('not cudf')
def test_create_entityset_with_mixed_dataframe_types(pd_es, cudf_es):
df = pd.DataFrame({"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27]})
cudf_df = cudf.from_pandas(df)
# Test error is raised when trying to add Koalas entity to entitset with existing pandas entities
err_msg = "All entity dataframes must be of the same type. " \
"Cannot add entity of type {} to an entityset with existing entities " \
"of type {}".format(type(cudf_df), type(pd_es.entities[0].df))
with pytest.raises(ValueError, match=err_msg):
pd_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=cudf_df,
index="id")
# Test error is raised when trying to add pandas entity to entitset with existing cudf entities
err_msg = "All entity dataframes must be of the same type. " \
"Cannot add entity of type {} to an entityset with existing entities " \
"of type {}".format(type(df), type(cudf_es.entities[0].df))
with pytest.raises(ValueError, match=err_msg):
cudf_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=df,
index="id")
@pytest.mark.skipif('not cudf')
def test_add_last_time_indexes():
pd_es = EntitySet(id="pd_es")
cudf_es = EntitySet(id="cudf_es")
sessions = pd.DataFrame({"id": [0, 1, 2, 3],
"user": [1, 2, 1, 3],
"time": [ | pd.to_datetime('2019-01-10') | pandas.to_datetime |
import os
import random
import shutil
import numpy as np
import pandas as pd
import pytest
from PIL import Image
from keras_preprocessing.image import dataframe_iterator
from keras_preprocessing.image import image_data_generator
@pytest.fixture(scope='module')
def all_test_images():
img_w = img_h = 20
rgb_images = []
rgba_images = []
gray_images = []
for n in range(8):
bias = np.random.rand(img_w, img_h, 1) * 64
variance = np.random.rand(img_w, img_h, 1) * (255 - 64)
imarray = np.random.rand(img_w, img_h, 3) * variance + bias
im = Image.fromarray(imarray.astype('uint8')).convert('RGB')
rgb_images.append(im)
imarray = np.random.rand(img_w, img_h, 4) * variance + bias
im = Image.fromarray(imarray.astype('uint8')).convert('RGBA')
rgba_images.append(im)
imarray = np.random.rand(img_w, img_h, 1) * variance + bias
im = Image.fromarray(
imarray.astype('uint8').squeeze()).convert('L')
gray_images.append(im)
return [rgb_images, rgba_images, gray_images]
def test_dataframe_iterator(all_test_images, tmpdir):
num_classes = 2
# save the images in the tmpdir
count = 0
filenames = []
filepaths = []
filenames_without = []
for test_images in all_test_images:
for im in test_images:
filename = "image-{}.png".format(count)
filename_without = "image-{}".format(count)
filenames.append(filename)
filepaths.append(os.path.join(str(tmpdir), filename))
filenames_without.append(filename_without)
im.save(str(tmpdir / filename))
count += 1
df = pd.DataFrame({
"filename": filenames,
"class": [str(random.randint(0, 1)) for _ in filenames],
"filepaths": filepaths
})
# create iterator
iterator = dataframe_iterator.DataFrameIterator(df, str(tmpdir))
batch = next(iterator)
assert len(batch) == 2
assert isinstance(batch[0], np.ndarray)
assert isinstance(batch[1], np.ndarray)
generator = image_data_generator.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(df, x_col='filepaths')
df_iterator_dir = generator.flow_from_dataframe(df, str(tmpdir))
df_sparse_iterator = generator.flow_from_dataframe(df, str(tmpdir),
class_mode="sparse")
assert not np.isnan(df_sparse_iterator.classes).any()
# check number of classes and images
assert len(df_iterator.class_indices) == num_classes
assert len(df_iterator.classes) == count
assert set(df_iterator.filenames) == set(filepaths)
assert len(df_iterator_dir.class_indices) == num_classes
assert len(df_iterator_dir.classes) == count
assert set(df_iterator_dir.filenames) == set(filenames)
# test without shuffle
_, batch_y = next(generator.flow_from_dataframe(df, str(tmpdir),
shuffle=False,
class_mode="sparse"))
assert (batch_y == df['class'].astype('float')[:len(batch_y)]).all()
# Test invalid use cases
with pytest.raises(ValueError):
generator.flow_from_dataframe(df, str(tmpdir), color_mode='cmyk')
with pytest.raises(ValueError):
generator.flow_from_dataframe(df, str(tmpdir), class_mode='output')
with pytest.warns(DeprecationWarning):
generator.flow_from_dataframe(df, str(tmpdir), has_ext=True)
with pytest.warns(DeprecationWarning):
generator.flow_from_dataframe(df, str(tmpdir), has_ext=False)
def preprocessing_function(x):
"""This will fail if not provided by a Numpy array.
Note: This is made to enforce backward compatibility.
"""
assert x.shape == (26, 26, 3)
assert type(x) is np.ndarray
return np.zeros_like(x)
# Test usage as Sequence
generator = image_data_generator.ImageDataGenerator(
preprocessing_function=preprocessing_function)
dir_seq = generator.flow_from_dataframe(df, str(tmpdir),
target_size=(26, 26),
color_mode='rgb',
batch_size=3,
class_mode='categorical')
assert len(dir_seq) == np.ceil(count / 3)
x1, y1 = dir_seq[1]
assert x1.shape == (3, 26, 26, 3)
assert y1.shape == (3, num_classes)
x1, y1 = dir_seq[5]
assert (x1 == 0).all()
with pytest.raises(ValueError):
x1, y1 = dir_seq[9]
def test_dataframe_iterator_validate_filenames(all_test_images, tmpdir):
# save the images in the paths
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
im.save(str(tmpdir / filename))
filenames.append(filename)
count += 1
df = pd.DataFrame({"filename": filenames + ['test.jpp', 'test.jpg']})
generator = image_data_generator.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(df,
str(tmpdir),
class_mode="input")
assert len(df_iterator.filenames) == len(df['filename']) - 2
df_iterator = generator.flow_from_dataframe(df,
str(tmpdir),
class_mode="input",
validate_filenames=False)
assert len(df_iterator.filenames) == len(df['filename'])
def test_dataframe_iterator_sample_weights(all_test_images, tmpdir):
# save the images in the paths
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
im.save(str(tmpdir / filename))
filenames.append(filename)
count += 1
df = pd.DataFrame({"filename": filenames})
df['weight'] = ([2, 5] * len(df))[:len(df)]
generator = image_data_generator.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(df, str(tmpdir),
x_col="filename",
y_col=None,
shuffle=False,
batch_size=5,
weight_col='weight',
class_mode="input")
batch = next(df_iterator)
assert len(batch) == 3 # (x, y, weights)
# check if input and output have the same shape and they're the same
assert(batch[0].all() == batch[1].all())
# check if the input and output images are not the same numpy array
input_img = batch[0][0]
output_img = batch[1][0]
output_img[0][0][0] += 1
assert input_img[0][0][0] != output_img[0][0][0]
assert np.array_equal(np.array([2, 5, 2, 5, 2]), batch[2])
# fail
df['weight'] = (['2', '5'] * len(df))[:len(df)]
with pytest.raises(TypeError):
image_data_generator.ImageDataGenerator().flow_from_dataframe(
df,
weight_col='weight',
class_mode="input"
)
def test_dataframe_iterator_class_mode_input(all_test_images, tmpdir):
# save the images in the paths
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
im.save(str(tmpdir / filename))
filenames.append(filename)
count += 1
df = pd.DataFrame({"filename": filenames})
generator = image_data_generator.ImageDataGenerator()
df_autoencoder_iterator = generator.flow_from_dataframe(df, str(tmpdir),
x_col="filename",
y_col=None,
class_mode="input")
batch = next(df_autoencoder_iterator)
# check if input and output have the same shape and they're the same
assert np.allclose(batch[0], batch[1])
# check if the input and output images are not the same numpy array
input_img = batch[0][0]
output_img = batch[1][0]
output_img[0][0][0] += 1
assert(input_img[0][0][0] != output_img[0][0][0])
df_autoencoder_iterator = generator.flow_from_dataframe(df, str(tmpdir),
x_col="filename",
y_col="class",
class_mode="input")
batch = next(df_autoencoder_iterator)
# check if input and output have the same shape and they're the same
assert(batch[0].all() == batch[1].all())
# check if the input and output images are not the same numpy array
input_img = batch[0][0]
output_img = batch[1][0]
output_img[0][0][0] += 1
assert(input_img[0][0][0] != output_img[0][0][0])
def test_dataframe_iterator_class_mode_categorical_multi_label(all_test_images,
tmpdir):
# save the images in the paths
filenames = []
count = 0
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
im.save(str(tmpdir / filename))
filenames.append(filename)
count += 1
label_opt = ['a', 'b', ['a'], ['b'], ['a', 'b'], ['b', 'a']]
df = pd.DataFrame({
"filename": filenames,
"class": [random.choice(label_opt) for _ in filenames[:-2]] + ['b', 'a']
})
generator = image_data_generator.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(df, str(tmpdir))
batch_x, batch_y = next(df_iterator)
assert isinstance(batch_x, np.ndarray)
assert len(batch_x.shape) == 4
assert isinstance(batch_y, np.ndarray)
assert batch_y.shape == (len(batch_x), 2)
for labels in batch_y:
assert all(label in {0, 1} for label in labels)
# on first 3 batches
df = pd.DataFrame({
"filename": filenames,
"class": [['b', 'a']] + ['b'] + [['c']] + [random.choice(label_opt)
for _ in filenames[:-3]]
})
generator = image_data_generator.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(df, str(tmpdir), shuffle=False)
batch_x, batch_y = next(df_iterator)
assert isinstance(batch_x, np.ndarray)
assert len(batch_x.shape) == 4
assert isinstance(batch_y, np.ndarray)
assert batch_y.shape == (len(batch_x), 3)
for labels in batch_y:
assert all(label in {0, 1} for label in labels)
assert (batch_y[0] == np.array([1, 1, 0])).all()
assert (batch_y[1] == np.array([0, 1, 0])).all()
assert (batch_y[2] == np.array([0, 0, 1])).all()
def test_dataframe_iterator_class_mode_multi_output(all_test_images, tmpdir):
# save the images in the paths
filenames = []
count = 0
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
im.save(str(tmpdir / filename))
filenames.append(filename)
count += 1
# fit both outputs are a single number
df = pd.DataFrame({"filename": filenames}).assign(
output_0=np.random.uniform(size=len(filenames)),
output_1=np.random.uniform(size=len(filenames))
)
df_iterator = image_data_generator.ImageDataGenerator().flow_from_dataframe(
df, y_col=['output_0', 'output_1'], directory=str(tmpdir),
batch_size=3, shuffle=False, class_mode='multi_output'
)
batch_x, batch_y = next(df_iterator)
assert isinstance(batch_x, np.ndarray)
assert len(batch_x.shape) == 4
assert isinstance(batch_y, list)
assert len(batch_y) == 2
assert np.array_equal(batch_y[0],
np.array(df['output_0'].tolist()[:3]))
assert np.array_equal(batch_y[1],
np.array(df['output_1'].tolist()[:3]))
# if one of the outputs is a 1D array
df['output_1'] = [np.random.uniform(size=(2, 2, 1)).flatten()
for _ in range(len(df))]
df_iterator = image_data_generator.ImageDataGenerator().flow_from_dataframe(
df, y_col=['output_0', 'output_1'], directory=str(tmpdir),
batch_size=3, shuffle=False, class_mode='multi_output'
)
batch_x, batch_y = next(df_iterator)
assert isinstance(batch_x, np.ndarray)
assert len(batch_x.shape) == 4
assert isinstance(batch_y, list)
assert len(batch_y) == 2
assert np.array_equal(batch_y[0],
np.array(df['output_0'].tolist()[:3]))
assert np.array_equal(batch_y[1],
np.array(df['output_1'].tolist()[:3]))
# if one of the outputs is a 2D array
df['output_1'] = [np.random.uniform(size=(2, 2, 1))
for _ in range(len(df))]
df_iterator = image_data_generator.ImageDataGenerator().flow_from_dataframe(
df, y_col=['output_0', 'output_1'], directory=str(tmpdir),
batch_size=3, shuffle=False, class_mode='multi_output'
)
batch_x, batch_y = next(df_iterator)
assert isinstance(batch_x, np.ndarray)
assert len(batch_x.shape) == 4
assert isinstance(batch_y, list)
assert len(batch_y) == 2
assert np.array_equal(batch_y[0],
np.array(df['output_0'].tolist()[:3]))
assert np.array_equal(batch_y[1],
np.array(df['output_1'].tolist()[:3]))
# fail if single column
with pytest.raises(TypeError):
image_data_generator.ImageDataGenerator().flow_from_dataframe(
df, y_col='output_0',
directory=str(tmpdir),
class_mode='multi_output'
)
def test_dataframe_iterator_class_mode_raw(all_test_images, tmpdir):
# save the images in the paths
filenames = []
count = 0
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
im.save(str(tmpdir / filename))
filenames.append(filename)
count += 1
# case for 1D output
df = pd.DataFrame({"filename": filenames}).assign(
output_0=np.random.uniform(size=len(filenames)),
output_1=np.random.uniform(size=len(filenames))
)
df_iterator = image_data_generator.ImageDataGenerator().flow_from_dataframe(
df, y_col='output_0', directory=str(tmpdir),
batch_size=3, shuffle=False, class_mode='raw'
)
batch_x, batch_y = next(df_iterator)
assert isinstance(batch_x, np.ndarray)
assert len(batch_x.shape) == 4
assert isinstance(batch_y, np.ndarray)
assert batch_y.shape == (3,)
assert np.array_equal(batch_y, df['output_0'].values[:3])
# case with a 2D output
df_iterator = image_data_generator.ImageDataGenerator().flow_from_dataframe(
df, y_col=['output_0', 'output_1'], directory=str(tmpdir),
batch_size=3, shuffle=False, class_mode='raw'
)
batch_x, batch_y = next(df_iterator)
assert isinstance(batch_x, np.ndarray)
assert len(batch_x.shape) == 4
assert isinstance(batch_y, np.ndarray)
assert batch_y.shape == (3, 2)
assert np.array_equal(batch_y,
df[['output_0', 'output_1']].values[:3])
@pytest.mark.parametrize('validation_split,num_training', [
(0.25, 18),
(0.50, 12),
(0.75, 6),
])
def test_dataframe_iterator_with_validation_split(all_test_images, validation_split,
num_training, tmpdir):
num_classes = 2
# save the images in the tmpdir
count = 0
filenames = []
filenames_without = []
for test_images in all_test_images:
for im in test_images:
filename = "image-{}.png".format(count)
filename_without = "image-{}".format(count)
filenames.append(filename)
filenames_without.append(filename_without)
im.save(str(tmpdir / filename))
count += 1
df = pd.DataFrame({"filename": filenames,
"class": [str(random.randint(0, 1)) for _ in filenames]})
# create iterator
generator = image_data_generator.ImageDataGenerator(
validation_split=validation_split
)
df_sparse_iterator = generator.flow_from_dataframe(df,
str(tmpdir),
class_mode="sparse")
if np.isnan(next(df_sparse_iterator)[:][1]).any():
raise ValueError('Invalid values.')
with pytest.raises(ValueError):
generator.flow_from_dataframe(
df, tmpdir, subset='foo')
train_iterator = generator.flow_from_dataframe(df, str(tmpdir),
subset='training')
assert train_iterator.samples == num_training
valid_iterator = generator.flow_from_dataframe(df, str(tmpdir),
subset='validation')
assert valid_iterator.samples == count - num_training
# check number of classes and images
assert len(train_iterator.class_indices) == num_classes
assert len(train_iterator.classes) == num_training
assert len(set(train_iterator.filenames) &
set(filenames)) == num_training
def test_dataframe_iterator_with_custom_indexed_dataframe(all_test_images, tmpdir):
num_classes = 2
# save the images in the tmpdir
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
filename = "image-{}.png".format(count)
filenames.append(filename)
im.save(str(tmpdir / filename))
count += 1
# create dataframes
classes = np.random.randint(num_classes, size=len(filenames))
classes = [str(c) for c in classes]
df = pd.DataFrame({"filename": filenames,
"class": classes})
df2 = pd.DataFrame({"filename": filenames,
"class": classes},
index=np.arange(1, len(filenames) + 1))
df3 = pd.DataFrame({"filename": filenames,
"class": classes},
index=filenames)
# create iterators
seed = 1
generator = image_data_generator.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(
df, str(tmpdir), seed=seed)
df2_iterator = generator.flow_from_dataframe(
df2, str(tmpdir), seed=seed)
df3_iterator = generator.flow_from_dataframe(
df3, str(tmpdir), seed=seed)
# Test all iterators return same pairs of arrays
for _ in range(len(filenames)):
a1, c1 = next(df_iterator)
a2, c2 = next(df2_iterator)
a3, c3 = next(df3_iterator)
assert np.array_equal(a1, a2)
assert np.array_equal(a1, a3)
assert np.array_equal(c1, c2)
assert np.array_equal(c1, c3)
def test_dataframe_iterator_n(all_test_images, tmpdir):
# save the images in the tmpdir
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
filename = "image-{}.png".format(count)
filenames.append(filename)
im.save(str(tmpdir / filename))
count += 1
# exclude first two items
n_files = len(filenames)
input_filenames = filenames[2:]
# create dataframes
classes = np.random.randint(2, size=len(input_filenames))
classes = [str(c) for c in classes]
df = pd.DataFrame({"filename": input_filenames})
df2 = pd.DataFrame({"filename": input_filenames,
"class": classes})
# create iterators
generator = image_data_generator.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(
df, str(tmpdir), class_mode=None)
df2_iterator = generator.flow_from_dataframe(
df2, str(tmpdir), class_mode='binary')
# Test the number of items in iterators
assert df_iterator.n == n_files - 2
assert df2_iterator.n == n_files - 2
def test_dataframe_iterator_absolute_path(all_test_images, tmpdir):
# save the images in the tmpdir
count = 0
file_paths = []
for test_images in all_test_images:
for im in test_images:
filename = "image-{:0>5}.png".format(count)
file_path = str(tmpdir / filename)
file_paths.append(file_path)
im.save(file_path)
count += 1
# prepare an image with a forbidden extension.
file_path_fbd = str(tmpdir / 'image-forbid.fbd')
shutil.copy(file_path, file_path_fbd)
# create dataframes
classes = np.random.randint(2, size=len(file_paths))
classes = [str(c) for c in classes]
df = pd.DataFrame({"filename": file_paths})
df2 = pd.DataFrame({"filename": file_paths,
"class": classes})
df3 = | pd.DataFrame({"filename": ['image-not-exist.png'] + file_paths}) | pandas.DataFrame |
# 1.题出问题
# 什么样的人在泰坦尼克号中更容易存活?
# 2.理解数据
# 2.1 采集数据
# https://www.kaggle.com/c/titanic
# 2.2 导入数据
# 忽略警告提示
import warnings
warnings.filterwarnings('ignore')
# 导入处理数据包
import numpy as np
import pandas as pd
# 导入数据
# 训练数据集
train = pd.read_csv("./train.csv")
# 测试数据集
test = pd.read_csv("./test.csv")
# 显示所有列
pd.set_option('display.max_columns', None)
| pd.set_option('display.max_rows', None) | pandas.set_option |
import requests
import pandas
from bs4 import BeautifulSoup
# creating a soup object with html we got from the response
url = "https://hacktoberfest.digitalocean.com/events"
response = requests.get(url)
html = response.text
soup = BeautifulSoup(html)
# creating array of datas
all_names = []
all_locations = []
all_dates = []
all_time_zones = []
all_urls = []
# iterating on all the "tr" elements.
for tr_element in soup.findAll("tr", attrs={"class": "past"}):
# for each tr element we find the proper value and add it to its proper array
name_element = tr_element.find("td", attrs={"class": "event_name"})
name = name_element.text.strip()
all_names.append(name)
location_element = tr_element.find("td", attrs={"class": "location"})
location = location_element.text.strip()
all_locations.append(location)
date_element = tr_element.find("td", attrs={"data-label": "date"})
date = date_element.text.strip()
all_dates.append(date)
time_zone_element = tr_element.find("td", attrs={"data-label": "zone"})
time_zone = time_zone_element.text.strip()
all_time_zones.append(time_zone)
url_element = tr_element.find("a", attrs={"class": "emphasis"})
url = url_element['href']
all_urls.append(url)
# setting up our Comma Seperated Values
csv_name = "events.csv"
csv_structure = {
"Name": all_names,
"Location": all_locations,
"Date": all_dates,
"Time Zone": all_time_zones,
"URL": all_urls,
}
# Creating a csv
dataFrame = | pandas.DataFrame(csv_structure) | pandas.DataFrame |
import os
import math
import numpy as np
import pandas as pd
import nltk
import textstat
from string import punctuation
class Text:
def __init__(self, filename:str, directory:str='CEFR_texts'): # file name must include .txt ending
self.filename = filename
self.dir = directory
# CEFR levels
self.cefr = filename[:2]
self.cefr_micro_numerical = self.cefr_conv('6 numerical')
self.cefr_macro_categorical = self.cefr_conv('3 categorical')
self.cefr_macro_numerical = self.cefr_conv('3 numerical')
# EXTRACTED
self.text = self.extract_text()
self.tokens = nltk.word_tokenize(self.text)
self.sents = nltk.sent_tokenize(self.text)
self.sents_tokens = self.extract_sents_tokens()
self.pos = self.pos_tagger()
# CLEANED AND PREPROCESSED
self.sents_tokens_cleaned = self.sents_tokens_clean()
self.tokens_cleaned = self.tokens_clean()
self.sents_tokens_lemmatized = self.lemmatize()
self.tokens_lemmatized = self.lemmatize_tokens()
self.tokens_stemmed = self.stem_tokens()
# FEATURES: LEXICAL
self.awl = self.extract_awl()
self.ttr = self.extract_ttr()
self.attro = len(self.tokens)*self.ttr # ORIGINAL ATTR
self.attr = math.log(len(self.tokens)) * self.ttr
self.abvmean = self.extract_abv('mean')
self.abvmin = self.extract_abv('min')
self.abvmax = self.extract_abv('max')
self.ajcv = self.extract_ajcv('ajcv')
self.jcpp = self.extract_ajcv('na_perc')
self.bpera = self.extract_bpera()
# FEATURES: SYNTACTIC
self.asl = self.extract_asl()
self.avps = self.extract_avps()
self.apps = self.extract_apps()
# FEATURES: READABILITY FORMULAS
self.ari = self.extract_ari()
self.cli = textstat.coleman_liau_index(self.text)
self.dcrs = textstat.dale_chall_readability_score_v2(self.text)
self.fre = textstat.flesch_reading_ease(self.text)
self.fkg = textstat.flesch_kincaid_grade(self.text)
self.len = len(self.tokens)
def cefr_conv(self, conversion):
"""Converts A1-C2 CEFR levels to sclae or categorical variables based on the conversion variable:
conversion = '6 categorical'
conversion = '6 numerical'
conversion = '3 categorical'
conversion = '3 numerical'"""
cefr_micro = ['A1', 'A2', 'B1', 'B2', 'C1', 'C2']
if conversion == '6 categorical':
return self.cefr
elif conversion == '6 numerical':
cefr_converted = cefr_micro.index(self.cefr)
return cefr_converted
elif conversion == '3 categorical':
cefr_macro = ['A', 'A', 'B', 'B', 'C', 'C']
cefr_converted = cefr_macro[cefr_micro.index(self.cefr)]
return cefr_converted
elif conversion == '3 numerical':
cefr_macro = [0, 0, 1, 1, 2, 2]
cefr_converted = cefr_macro[cefr_micro.index(self.cefr)]
return cefr_converted
def extract_text(self) -> str:
"""Converts txt file to a string."""
fhand = open(f'{self.dir}/{self.filename}', encoding='utf-8')
text = [line.strip() for line in fhand]
text = ' '.join(text)
fhand.close()
return text
def extract_sents_tokens(self) -> list:
"""Extracts a list of sentences consisting of a list of tokens."""
sents = self.sents.copy()
sents_tokens = []
for sent in sents:
tokens = nltk.word_tokenize(sent)
sents_tokens.append(tokens)
return sents_tokens
def sents_tokens_clean(self) -> list: # may want to change to lower case as well
"""PREPOCESSING: Removes the folowing:
1. number related
2. punctuation signs"""
sents_tokens = self.sents_tokens.copy()
# replace all tokens that start with number of punc with $removed$
for i in range(len(self.sents_tokens)):
for j in range(len(self.sents_tokens[i])):
if sents_tokens[i][j][0] in '0123456789!"%\'+,-.:;?_`':
sents_tokens[i][j] = '$removed$'
# remove all the $removed$
for i in range(len(sents_tokens)):
for j in range(sents_tokens[i].count('$removed$')):
sents_tokens[i].remove('$removed$')
return sents_tokens
def tokens_clean(self) -> list: # may not need
"""Removes numbers and punctuation marks from self.tokens"""
tokens = self.tokens
for i in range(len(self.tokens)):
if self.tokens[i][0] in '0123456789!"%\'+,-.:;?_`':
tokens[i] = '$removed$'
for i in range(tokens.count('$removed$')):
tokens.remove('$removed$')
return tokens
def pos_tagger(self) -> list:
"""Returns tuples of words and pos in sents_tokens format
The input is self.sents_tokens"""
all_pos = nltk.pos_tag_sents(self.sents_tokens)
return all_pos
# EXTRACT FEATURES
def extract_asl(self) -> float:
"""Extracts the average sentence length (ASL) feature in tokens
CAUTION: includes punctuations as tokens"""
sents_tokens = self.sents_tokens.copy()
sents_lens = [len(sent) for sent in sents_tokens]
return np.mean(sents_lens)
def extract_awl(self) -> float:
"""Extracts the average word length (AWL) feature
CAUTION: need to create a custom punctuation mark list that includes [’] for instance. not the same as [']"""
tokens = self.tokens
for token in tokens:
if token in punctuation:
tokens.remove(token)
tokens_lens = [len(token) for token in tokens]
return np.mean(tokens_lens)
def extract_avps(self) -> float:
"""Extracts the average verb per sentence (AVPS) feature"""
verb_tags = ['VB', 'VBG', 'BVD', 'VBN', 'VBP', 'VBZ']
all_pos = nltk.pos_tag_sents(self.sents_tokens)
verbs = [[]for sent in all_pos]
for i in range(len(all_pos)):
for j in range(len(all_pos[i])):
if all_pos[i][j][1] in verb_tags:
verbs[i].append(all_pos[i][j])
vps = [len(sent) for sent in verbs]
return np.mean(vps)
def extract_apps(self) -> float:
"""Extracts the average pronouns per sentence (APPS) feature"""
pronoun_tags = ['PRP', 'PRP$', 'WP']
all_pos = nltk.pos_tag_sents(self.sents_tokens)
pronouns = [[]for sent in all_pos]
for i in range(len(all_pos)):
for j in range(len(all_pos[i])):
if all_pos[i][j][1] in pronoun_tags:
pronouns[i].append(all_pos[i][j])
pps = [len(sent) for sent in pronouns]
return np.mean(pps)
def extract_ttr(self) -> float:
"""Extracts the type token ratio (TTR) feature
CAUTION: types and tokens include punctuation marks and arabic numbers"""
tokens = self.tokens
types = set(tokens)
ttr = len(types)/len(tokens)
return(ttr)
def extract_abv(self, kind) -> float: # incomplete NA part (may not need NA part)
"""Extracts the average bandwidth value (ABV) from 20k Academic English
kind = 'mean', 'min', 'max'"""
df = pd.read_csv('allWords - list.csv', index_col = 'word')
dic = []
na = [] # NA
for sent in self.sents_tokens_lemmatized:
for word in sent:
if (df.index == word.lower()).any() == True:
dic.append(df.loc[word.lower()].band)
else: # NA
na.append(word) # NA
dic_band = []
if kind == 'mean':
for i in dic:
dic_band.append(np.mean(i))
elif kind == 'min':
for i in dic:
if type(i) == pd.Series:
dic_band.append(min(i))
else:
dic_band.append(i)
elif kind == 'max':
for i in dic:
if type(i) == pd.Series:
dic_band.append(max(i))
else:
dic_band.append(i)
dic_band_average = sum(dic_band) / len(dic_band)
dic_band_average = sum(dic_band) / len(dic_band)
return dic_band_average
def extract_ajcv(self, output):
"""Extracts the average CEFR-J value (AJCV) feature
output = 'ajcv'
output = 'na'
output = 'na_perc'
method: mean"""
df = pd.read_csv('jcefr_numerical.csv', index_col = 'headword')
dic = []
na = [] # NA
for word in self.tokens_stemmed:
if (df.index == word.lower()).any() == True:
dic.append(df.loc[word.lower()].CEFR)
else: # NA
na.append(word) # NA
cefrl = []
for i in dic:
cefrl.append(np.mean(i))
acv = np.mean(cefrl)
if output == 'ajcv':
return acv
elif output == 'na':
return na
elif output == 'na_perc':
perc = len(self.extract_ajcv('na')) / len(self.tokens_stemmed) * 100
return perc
def extract_bpera(self,):
"""Extracts the ratio of B words to A words (BPERA) feature
method: mean"""
df = pd.read_csv('jcefr_numerical.csv', index_col = 'headword')
dic = []
na = [] # NA
for word in self.tokens_stemmed:
if (df.index == word.lower()).any() == True:
dic.append(df.loc[word.lower()].CEFR)
else: # NA
na.append(word) # NA
cefrl = []
for i in dic:
cefrl.append(np.mean(i))
cefrab = []
for i in cefrl:
if i < 1.5:
cefrab.append(0)
elif i >= 1.5:
cefrab.append(1)
bpera = cefrab.count(1)/cefrab.count(0)
return bpera
def extract_ari(self):
"""Extracts the automated readability index (ARI) feature"""
grade_level = (0.5 * self.asl) + (4.71 * self.awl) - 21.43
return grade_level
# NOT FEATURES
def lemmatize(self) -> list:
"""Input is self.sents_tokens
Input must be list of list, i.e. list = [['list', 'must', 'be', 'like', 'this'], [...]]
Returns a lemmatized version of sents_tokens"""
lemmatizer = nltk.stem.WordNetLemmatizer()
lemmas = self.sents_tokens_cleaned.copy()
for i in range(len(self.sents_tokens)):
for j in range(len(lemmas[i])):
lemmas[i][j] = lemmatizer.lemmatize((self.sents_tokens[i][j]).lower())
return lemmas
def lemmatize_tokens(self) -> list:
"""Input is self.tokens
Returns a lemmatized version of tokens"""
lemmatizer = nltk.stem.WordNetLemmatizer()
lemmas = self.tokens.copy()
for i in range(len(self.tokens)):
lemma = lemmatizer.lemmatize(self.tokens[i])
lemmas[i] = lemma
return lemmas
def stem_tokens(self) -> list:
"""Input is self.tokens_lemmatized
Returns a stemmed version of the lemmatized tokens"""
df = pd.read_csv('verbs.csv')
stemmed = []
for token in self.tokens_lemmatized:
word = token.lower()
result = df.isin([word])
series = result.any()
cols = list(series[series == True].index)
all_rows = []
for col in cols:
rows = list(result[col][result[col] == True].index)
all_rows.append(rows)
if len(all_rows) == 0:
stem = word
else:
ind = all_rows[0][0]
stem = df[ind:ind+1].stem.values[0]
stemmed.append(stem)
return stemmed
def write_features(filename='training_data', directory='train_data', cefr_format='6 numerical'): # write what features
"""Text.write_features('training_data', 'train_data', '6 numerical')
General information about each text:
filename
cefr: cefr level of the text (formats include '6 categorical', '6 numerical', '3 categorical', '3 numerical')
Extracts features from texts in the 'CEFR_texts' folder:
The extracted features are:
ABVMAX, ABVMEAN, ABVMIN, AJCV, APPS, ARI, ASL, ASL.AVPS, ATTR, AVPS, AWL, BPERA, CLI, DCRS, FKG, FRE, JCPP, LEN, TTR
"""
counter = 1
all_texts = os.listdir(directory)
# empty lists for y and discriptions
file_names = []
cefr_levels = []
# empty lists for features
abvmax = []
abvmean = []
abvmin = []
ajcv = []
apps = []
ari = []
asl = []
aslavps = []
attr = []
avps = []
awl = []
bpera = []
cli = []
dcrs = []
fkg = []
fre = []
jcpp = []
length = []
ttr = []
for text in all_texts:
print(f'{counter} / {len(all_texts)}')
current_text = Text(text, directory=directory)
# Choosing cefr format
if cefr_format == '6 categorical':
cefr = current_text.cefr
elif cefr_format == '6 numerical':
cefr = current_text.cefr_micro_numerical
elif cefr_format == '3 categorical':
cefr = current_text.cefr_macro_categorical
elif cefr_format == '3 numerical':
cefr = current_text.cefr_macro_numerical
# Appending features for each text into the empty list:
file_names.append(current_text.filename)
cefr_levels.append(cefr)
abvmax.append(current_text.abvmax)
abvmean.append(current_text.abvmean)
abvmin.append(current_text.abvmin)
ajcv.append(current_text.ajcv)
apps.append(current_text.apps)
ari.append(current_text.ari)
asl.append(current_text.asl)
aslavps.append(current_text.asl*current_text.avps)
attr.append(current_text.attr)
avps.append(current_text.avps)
awl.append(current_text.awl)
bpera.append(current_text.bpera)
cli.append(current_text.cli)
dcrs.append(current_text.dcrs)
fkg.append(current_text.fkg)
fre.append(current_text.fre)
jcpp.append(current_text.jcpp)
length.append(current_text.len)
ttr.append(current_text.ttr)
counter += 1
# Converting lists into columnns of a dataframe, which is then converted into a .csv file
data_tuples = list(zip(file_names, cefr_levels, abvmax, abvmean, abvmin, ajcv, apps, ari, asl, aslavps, attr, avps, awl, bpera, cli, dcrs, fkg, fre, jcpp, length, ttr))
df = | pd.DataFrame(data_tuples, columns=['filename', 'cefr', 'abvmax', 'abvmean', 'abvmin', 'ajcv', 'apps', 'ari', 'asl', 'aslavps', 'attr', 'avps', 'awl', 'bpera', 'cli', 'dcrs', 'fkg', 'fre', 'jcpp', 'len', 'ttr']) | pandas.DataFrame |
from pandas import read_table, DataFrame
from pytest import raises
from gsea_api import cudaGSEA
from gsea_api.gsea import GSEApy
from gsea_api.expression_set import ExpressionSet
from gsea_api.gsea.exceptions import GSEANoResults
from gsea_api.molecular_signatures_db import GeneSets
matrix = read_table('tests/expression_data.tsv', index_col='Gene')
classes = ['Control'] * 4 + ['Cancer'] * 4
def test_gsea_py():
gsea = GSEApy()
data = ExpressionSet(matrix.copy(), classes)
result = gsea.run(expression_data=data, **{'min-size': 1})
assert isinstance(result, DataFrame)
assert len(result) > 2
def test_cuda_gsea(monkeypatch):
gsea = cudaGSEA(path='cudaGSEA/cudaGSEA/src/cudaGSEA')
data = ExpressionSet(matrix.copy(), classes)
gene_sets = GeneSets.from_gmt('tests/sets_for_expression.gmt')
def mock_cuda_run(*args, **kwargs):
return | read_table('tests/cudaGSEA_subprocess_output.tsv') | pandas.read_table |
import nose
import unittest
from numpy import nan
from pandas.core.daterange import DateRange
from pandas.core.index import Index, MultiIndex
from pandas.core.common import rands, groupby
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal)
from pandas.core.panel import WidePanel
from collections import defaultdict
import pandas.core.datetools as dt
import numpy as np
import pandas.util.testing as tm
# unittest.TestCase
def commonSetUp(self):
self.dateRange = DateRange('1/1/2005', periods=250, offset=dt.bday)
self.stringIndex = Index([rands(8).upper() for x in xrange(250)])
self.groupId = Series([x[0] for x in self.stringIndex],
index=self.stringIndex)
self.groupDict = dict((k, v) for k, v in self.groupId.iteritems())
self.columnIndex = Index(['A', 'B', 'C', 'D', 'E'])
randMat = np.random.randn(250, 5)
self.stringMatrix = DataFrame(randMat, columns=self.columnIndex,
index=self.stringIndex)
self.timeMatrix = DataFrame(randMat, columns=self.columnIndex,
index=self.dateRange)
class GroupByTestCase(unittest.TestCase):
setUp = commonSetUp
def test_python_grouper(self):
groupFunc = self.groupDict.get
groups = groupby(self.stringIndex, groupFunc)
setDict = dict((k, set(v)) for k, v in groups.iteritems())
for idx in self.stringIndex:
key = groupFunc(idx)
groupSet = setDict[key]
assert(idx in groupSet)
class TestGroupBy(unittest.TestCase):
def setUp(self):
self.ts = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.df = DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B' : ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C' : np.random.randn(8),
'D' : np.random.randn(8)})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
def test_basic(self):
data = Series(np.arange(9) / 3, index=np.arange(9))
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
self.assertEqual(len(v), 3)
agged = grouped.aggregate(np.mean)
self.assertEqual(agged[1], 1)
assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
assert_series_equal(agged, grouped.mean())
# Cython only returning floating point for now...
assert_series_equal(grouped.agg(np.sum).astype(float),
grouped.sum())
transformed = grouped.transform(lambda x: x * x.sum())
self.assertEqual(transformed[7], 12)
value_grouped = data.groupby(data)
assert_series_equal(value_grouped.aggregate(np.mean), agged)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
agged = grouped.aggregate({'one' : np.mean,
'two' : np.std})
group_constants = {
0 : 10,
1 : 20,
2 : 30
}
agged = grouped.agg(lambda x: group_constants[x.groupName] + x.mean())
self.assertEqual(agged[1], 21)
# corner cases
self.assertRaises(Exception, grouped.aggregate, lambda x: x * 2)
def test_series_agg_corner(self):
# nothing to group, all NA
result = self.ts.groupby(self.ts * np.nan).sum()
assert_series_equal(result, Series([]))
def test_aggregate_str_func(self):
def _check_results(grouped):
# single series
result = grouped['A'].agg('std')
expected = grouped['A'].std()
assert_series_equal(result, expected)
# group frame by function name
result = grouped.aggregate('var')
expected = grouped.var()
assert_frame_equal(result, expected)
# group frame by function dict
result = grouped.agg({'A' : 'var', 'B' : 'std', 'C' : 'mean'})
expected = DataFrame({'A' : grouped['A'].var(),
'B' : grouped['B'].std(),
'C' : grouped['C'].mean()})
assert_frame_equal(result, expected)
by_weekday = self.tsframe.groupby(lambda x: x.weekday())
_check_results(by_weekday)
by_mwkday = self.tsframe.groupby([lambda x: x.month,
lambda x: x.weekday()])
_check_results(by_mwkday)
def test_basic_regression(self):
# regression
T = [1.0*x for x in range(1,10) *10][:1095]
result = Series(T, range(0, len(T)))
groupings = np.random.random((1100,))
groupings = Series(groupings, range(0, len(groupings))) * 10.
grouped = result.groupby(groupings)
grouped.mean()
def test_transform(self):
data = Series(np.arange(9) / 3, index=np.arange(9))
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
transformed = grouped.transform(lambda x: x * x.sum())
self.assertEqual(transformed[7], 12)
transformed = grouped.transform(np.mean)
for name, group in grouped:
mean = group.mean()
for idx in group.index:
self.assertEqual(transformed[idx], mean)
def test_dispatch_transform(self):
df = self.tsframe[::5].reindex(self.tsframe.index)
filled = df.groupby(lambda x: x.month).fillna(method='pad')
fillit = lambda x: x.fillna(method='pad')
expected = df.groupby(lambda x: x.month).transform(fillit)
| assert_frame_equal(filled, expected) | pandas.util.testing.assert_frame_equal |
""" Test cases for DataFrame.plot """
import string
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase
from pandas.io.formats.printing import pprint_thing
pytestmark = pytest.mark.slow
@td.skip_if_no_mpl
class TestDataFramePlotsSubplots(TestPlotBase):
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
for kind in ["bar", "barh", "line", "area"]:
axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
assert axes.shape == (3,)
for ax, column in zip(axes, df.columns):
self._check_legend_labels(ax, labels=[pprint_thing(column)])
for ax in axes[:-2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
if kind != "bar":
# change https://github.com/pandas-dev/pandas/issues/26714
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
axes = df.plot(kind=kind, subplots=True, sharex=False)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
axes = df.plot(kind=kind, subplots=True, legend=False)
for ax in axes:
assert ax.get_legend() is None
def test_subplots_timeseries(self):
idx = date_range(start="2014-07-01", freq="M", periods=10)
df = DataFrame(np.random.rand(10, 3), index=idx)
for kind in ["line", "area"]:
axes = df.plot(kind=kind, subplots=True, sharex=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes[:-2]:
# GH 7801
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
self._check_ticks_props(axes, xrot=0)
axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
self._check_ticks_props(ax, xlabelsize=7, xrot=45, ylabelsize=7)
def test_subplots_timeseries_y_axis(self):
# GH16953
data = {
"numeric": np.array([1, 2, 5]),
"timedelta": [
pd.Timedelta(-10, unit="s"),
pd.Timedelta(10, unit="m"),
pd.Timedelta(10, unit="h"),
],
"datetime_no_tz": [
pd.to_datetime("2017-08-01 00:00:00"),
pd.to_datetime("2017-08-01 02:00:00"),
pd.to_datetime("2017-08-02 00:00:00"),
],
"datetime_all_tz": [
pd.to_datetime("2017-08-01 00:00:00", utc=True),
pd.to_datetime("2017-08-01 02:00:00", utc=True),
pd.to_datetime("2017-08-02 00:00:00", utc=True),
],
"text": ["This", "should", "fail"],
}
testdata = DataFrame(data)
y_cols = ["numeric", "timedelta", "datetime_no_tz", "datetime_all_tz"]
for col in y_cols:
ax = testdata.plot(y=col)
result = ax.get_lines()[0].get_data()[1]
expected = testdata[col].values
assert (result == expected).all()
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
testdata.plot(y="text")
@pytest.mark.xfail(reason="not support for period, categorical, datetime_mixed_tz")
def test_subplots_timeseries_y_axis_not_supported(self):
"""
This test will fail for:
period:
since period isn't yet implemented in ``select_dtypes``
and because it will need a custom value converter +
tick formatter (as was done for x-axis plots)
categorical:
because it will need a custom value converter +
tick formatter (also doesn't work for x-axis, as of now)
datetime_mixed_tz:
because of the way how pandas handles ``Series`` of
``datetime`` objects with different timezone,
generally converting ``datetime`` objects in a tz-aware
form could help with this problem
"""
data = {
"numeric": np.array([1, 2, 5]),
"period": [
pd.Period("2017-08-01 00:00:00", freq="H"),
pd.Period("2017-08-01 02:00", freq="H"),
pd.Period("2017-08-02 00:00:00", freq="H"),
],
"categorical": pd.Categorical(
["c", "b", "a"], categories=["a", "b", "c"], ordered=False
),
"datetime_mixed_tz": [
pd.to_datetime("2017-08-01 00:00:00", utc=True),
pd.to_datetime("2017-08-01 02:00:00"),
pd.to_datetime("2017-08-02 00:00:00"),
],
}
testdata = DataFrame(data)
ax_period = testdata.plot(x="numeric", y="period")
assert (
ax_period.get_lines()[0].get_data()[1] == testdata["period"].values
).all()
ax_categorical = testdata.plot(x="numeric", y="categorical")
assert (
ax_categorical.get_lines()[0].get_data()[1]
== testdata["categorical"].values
).all()
ax_datetime_mixed_tz = testdata.plot(x="numeric", y="datetime_mixed_tz")
assert (
ax_datetime_mixed_tz.get_lines()[0].get_data()[1]
== testdata["datetime_mixed_tz"].values
).all()
def test_subplots_layout_multi_column(self):
# GH 6667
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(2, -1))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(4, -1))
self._check_axes_shape(axes, axes_num=3, layout=(4, 1))
assert axes.shape == (4, 1)
msg = "Layout of 1x1 must be larger than required size 3"
with pytest.raises(ValueError, match=msg):
df.plot(subplots=True, layout=(1, 1))
msg = "At least one dimension of layout must be positive"
with pytest.raises(ValueError, match=msg):
df.plot(subplots=True, layout=(-1, -1))
@pytest.mark.parametrize(
"kwargs, expected_axes_num, expected_layout, expected_shape",
[
({}, 1, (1, 1), (1,)),
({"layout": (3, 3)}, 1, (3, 3), (3, 3)),
],
)
def test_subplots_layout_single_column(
self, kwargs, expected_axes_num, expected_layout, expected_shape
):
# GH 6667
df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, **kwargs)
self._check_axes_shape(
axes,
axes_num=expected_axes_num,
layout=expected_layout,
)
assert axes.shape == expected_shape
def test_subplots_warnings(self):
# GH 9464
with tm.assert_produces_warning(None):
df = DataFrame(np.random.randn(100, 4))
df.plot(subplots=True, layout=(3, 2))
df = DataFrame(
np.random.randn(100, 4), index=date_range("1/1/2000", periods=100)
)
df.plot(subplots=True, layout=(3, 2))
def test_subplots_multiple_axes(self):
# GH 5353, 6970, GH 7069
fig, axes = self.plt.subplots(2, 3)
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes[0], sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3,)
assert returned[0].figure is fig
# draw on second row
returned = df.plot(subplots=True, ax=axes[1], sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3,)
assert returned[0].figure is fig
self._check_axes_shape(axes, axes_num=6, layout=(2, 3))
tm.close()
msg = "The number of passed axes must be 3, the same as the output plot"
with pytest.raises(ValueError, match=msg):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
df.plot(subplots=True, ax=axes)
# pass 2-dim axes and invalid layout
# invalid lauout should not affect to input and return value
# (show warning is tested in
# TestDataFrameGroupByPlots.test_grouped_box_multiple_axes
fig, axes = self.plt.subplots(2, 2)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
df = DataFrame(np.random.rand(10, 4), index=list(string.ascii_letters[:10]))
returned = df.plot(
subplots=True, ax=axes, layout=(2, 1), sharex=False, sharey=False
)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4,)
returned = df.plot(
subplots=True, ax=axes, layout=(2, -1), sharex=False, sharey=False
)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4,)
returned = df.plot(
subplots=True, ax=axes, layout=(-1, 2), sharex=False, sharey=False
)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4,)
# single column
fig, axes = self.plt.subplots(1, 1)
df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
assert axes.shape == (1,)
def test_subplots_ts_share_axes(self):
# GH 3964
fig, axes = self.plt.subplots(3, 3, sharex=True, sharey=True)
self.plt.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)
df = DataFrame(
np.random.randn(10, 9),
index=date_range(start="2014-07-01", freq="M", periods=10),
)
for i, ax in enumerate(axes.ravel()):
df[i].plot(ax=ax, fontsize=5)
# Rows other than bottom should not be visible
for ax in axes[0:-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=False)
# Bottom row should be visible
for ax in axes[-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=True)
# First column should be visible
for ax in axes[[0, 1, 2], [0]].ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
# Other columns should not be visible
for ax in axes[[0, 1, 2], [1]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes[[0, 1, 2], [2]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
def test_subplots_sharex_axes_existing_axes(self):
# GH 9158
d = {"A": [1.0, 2.0, 3.0, 4.0], "B": [4.0, 3.0, 2.0, 1.0], "C": [5, 1, 3, 4]}
df = DataFrame(d, index=date_range("2014 10 11", "2014 10 14"))
axes = df[["A", "B"]].plot(subplots=True)
df["C"].plot(ax=axes[0], secondary_y=True)
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
for ax in axes.ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
def test_subplots_dup_columns(self):
# GH 10962
df = DataFrame(np.random.rand(5, 5), columns=list("aaaaa"))
axes = df.plot(subplots=True)
for ax in axes:
self._check_legend_labels(ax, labels=["a"])
assert len(ax.lines) == 1
tm.close()
axes = df.plot(subplots=True, secondary_y="a")
for ax in axes:
# (right) is only attached when subplots=False
self._check_legend_labels(ax, labels=["a"])
assert len(ax.lines) == 1
tm.close()
ax = df.plot(secondary_y="a")
self._check_legend_labels(ax, labels=["a (right)"] * 5)
assert len(ax.lines) == 0
assert len(ax.right_ax.lines) == 5
def test_bar_log_no_subplots(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
expected = np.array([0.1, 1.0, 10.0, 100])
# no subplots
df = DataFrame({"A": [3] * 5, "B": list(range(1, 6))}, index=range(5))
ax = df.plot.bar(grid=True, log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
def test_bar_log_subplots(self):
expected = np.array([0.1, 1.0, 10.0, 100.0, 1000.0, 1e4])
ax = DataFrame([Series([200, 300]), Series([300, 500])]).plot.bar(
log=True, subplots=True
)
tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)
tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)
def test_boxplot_subplots_return_type(self, hist_df):
df = hist_df
# normal style: return_type=None
result = df.plot.box(subplots=True)
assert isinstance(result, Series)
self._check_box_return_type(
result, None, expected_keys=["height", "weight", "category"]
)
for t in ["dict", "axes", "both"]:
returned = df.plot.box(return_type=t, subplots=True)
self._check_box_return_type(
returned,
t,
expected_keys=["height", "weight", "category"],
check_ax_title=False,
)
def test_df_subplots_patterns_minorticks(self):
# GH 10657
import matplotlib.pyplot as plt
df = DataFrame(
np.random.randn(10, 2),
index=date_range("1/1/2000", periods=10),
columns=list("AB"),
)
# shared subplots
fig, axes = plt.subplots(2, 1, sharex=True)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
fig, axes = plt.subplots(2, 1)
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
| tm.close() | pandas._testing.close |
import pandas as pd
import numpy as np
import csv
from tqdm import trange
def clean(file_name,targets=['11612','11613']):
data = pd.read_csv(file_name)
data['result'].fillna(0,inplace=True)
data['result'] = data['result'].astype(int)
items = | pd.unique(data['item_id'].values) | pandas.unique |
# Necessary Libraries
import yfinance as yf, pandas as pd
import shutil
import os
import time
import glob
import numpy as np
import requests
from get_all_tickers import get_tickers as gt
from statistics import mean
from yahoo_fin import stock_info as si
# tickers = gt.get_tickers_filtered(mktcap_min=150000, mktcap_max=10000000)
tickers = si.tickers_sp500()
print("The amount of stocks chosen to observe: " + str(len(tickers)))
shutil.rmtree("/Users/shashank/Documents/Code/Python/Outputs/macd_accuracy/SMA_Analysis/Stocks/")
os.mkdir("/Users/shashank/Documents/Code/Python/Outputs/macd_accuracy/SMA_Analysis/Stocks/")
Amount_of_API_Calls = 0
Stock_Failure = 0
Stocks_Not_Imported = 0
i = 0
while (i < len(tickers)) and (Amount_of_API_Calls < 1800):
try:
print("Iteration = " + str(i))
stock = tickers[i]
temp = yf.Ticker(str(stock))
Hist_data = temp.history(period="max")
Hist_data.to_csv("/Users/shashank/Documents/Code/Python/Outputs/macd_accuracy/SMA_Analysis/Stocks/"+stock+".csv")
time.sleep(2)
Amount_of_API_Calls += 1
Stock_Failure = 0
i += 1
except ValueError:
print("Yahoo Finance Backend Error, Attempting to Fix")
if Stock_Failure > 5:
i+=1
Stocks_Not_Imported += 1
Amount_of_API_Calls += 1
Stock_Failure += 1
except requests.exceptions.SSLError as e:
print("Yahoo Finance Backend Error, Attempting to Fix SSL")
if Stock_Failure > 5:
i+=1
Stocks_Not_Imported += 1
Amount_of_API_Calls += 1
Stock_Failure += 1
print("The amount of stocks we successfully imported: " + str(i - Stocks_Not_Imported))
list_files = (glob.glob("/Users/shashank/Documents/Code/Python/Outputs/macd_accuracy/SMA_Analysis/Stocks/*.csv"))
Compare_Stocks = pd.DataFrame(columns=["Company", "Days_Observed", "Crosses", "True_Positive", "False_Positive", "True_Negative", "False_Negative", "Sensitivity",
"Specificity", "Accuracy", "TPR", "FPR"])
count = 0
for stock in list_files:
Hist_data = pd.read_csv(stock)
Company = ((os.path.basename(stock)).split(".csv")[0])
Days_Observed = 0
Crosses = 0
True_Positive = 0
False_Positive = 0
True_Negative = 0
False_Negative = 0
Sensitivity = 0
Specificity = 0
Accuracy = 0
prices = []
c = 0
while c < len(Hist_data):
if Hist_data.iloc[c,4] > float(2.00):
prices.append(Hist_data.iloc[c,4])
c += 1
prices_df = pd.DataFrame(prices)
day12 = prices_df.ewm(span=12).mean() #
day26 = prices_df.ewm(span=26).mean()
macd = []
counter=0
while counter < (len(day12)):
macd.append(day12.iloc[counter,0] - day26.iloc[counter,0])
counter += 1
macd_df = | pd.DataFrame(macd) | pandas.DataFrame |
# Globals #
import re
import numpy as np
import pandas as pd
import dateutil.parser as dp
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import *
from itertools import islice
from scipy.stats import boxcox
from scipy.integrate import simps
from realtime_talib import Indicator
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
from pprint import pprint
from selenium import webdriver
RANDOM_STATE = 42
# Sentiment Preprocessing
def remove_special_chars(headline_list):
"""
Returns list of headlines with all non-alphabetical characters removed.
"""
rm_spec_chars = [re.sub('[^ A-Za-z]+', "", headline) for headline in headline_list]
return rm_spec_chars
def tokenize(headline_list):
"""
Takes list of headlines as input and returns a list of lists of tokens.
"""
tokenized = []
for headline in headline_list:
tokens = word_tokenize(headline)
tokenized.append(tokens)
# print("tokenize")
# pprint(tokenized)
return tokenized
def remove_stop_words(tokenized_headline_list):
"""
Takes list of lists of tokens as input and removes all stop words.
"""
filtered_tokens = []
for token_list in tokenized_headline_list:
filtered_tokens.append([token for token in token_list if token not in set(stopwords.words('english'))])
# print("stop words")
# pprint(filtered_tokens)
return filtered_tokens
def stem(token_list_of_lists):
"""
Takes list of lists of tokens as input and stems every token.
Returns a list of lists of stems.
"""
stemmer = PorterStemmer()
stemmed = []
for token_list in token_list_of_lists:
# print(token_list)
stemmed.append([stemmer.stem(token) for token in token_list])
# print("stem")
# pprint(stemmed)
return stemmed
def make_bag_of_words(df, stemmed):
"""
Create bag of words model.
"""
print("\tCreating Bag of Words Model...")
very_pos = set()
slightly_pos = set()
neutral = set()
slightly_neg = set()
very_neg = set()
# Create sets that hold words in headlines categorized as "slightly_neg" or "slightly_pos" or etc
for stems, sentiment in zip(stemmed, df["Sentiment"].tolist()):
if sentiment == -2:
very_neg.update(stems)
elif sentiment == -1:
slightly_neg.update(stems)
elif sentiment == 0:
neutral.update(stems)
elif sentiment == 1:
slightly_pos.update(stems)
elif sentiment == 2:
very_pos.update(stems)
# Count number of words in each headline in each of the sets and encode it as a list of counts for each headline.
bag_count = []
for x in stemmed:
x = set(x)
bag_count.append(list((len(x & very_neg), len(x & slightly_neg), len(x & neutral), len(x & slightly_pos), len(x & very_pos))))
df["sentiment_class_count"] = bag_count
return df
def sentiment_preprocessing(df):
"""
Takes a dataframe, removes special characters, tokenizes
the headlines, removes stop-tokens, and stems the remaining tokens.
"""
specials_removed = remove_special_chars(df["Headline"].tolist())
tokenized = tokenize(specials_removed)
tokenized_filtered = remove_stop_words(tokenized)
stemmed = stem(tokenized_filtered)
return df, stemmed
def headlines_balanced_split(dataset, test_size):
"""
Randomly splits dataset into balanced training and test sets.
"""
print("\nSplitting headlines into *balanced* training and test sets...")
# pprint(list(dataset.values))
# pprint(dataset)
# Use sklearn.train_test_split to split all features into x_train and x_test,
# and all expected values into y_train and y_test numpy arrays
x_train, x_test, y_train, y_test = train_test_split(dataset.drop(["Sentiment", "Headline"], axis=1).values,
dataset["Sentiment"].values, test_size=test_size,
random_state=RANDOM_STATE)
x_train = [x[0] for x in x_train]
x_test = [x[0] for x in x_test]
# Combine x_train and y_train (numpy arrays) into a single dataframe, with column labels
train = pd.DataFrame(data=x_train, columns=["very_neg", "slightly_neg", "neutral", "slightly_pos", "very_pos"])
train["Sentiment"] = pd.Series(y_train)
# Do the same for x_test and y_test
test = pd.DataFrame(data=x_test, columns=["very_neg", "slightly_neg", "neutral", "slightly_pos", "very_pos"])
test["Sentiment"] = pd.Series(y_test)
train_prediction = train["Sentiment"].values
test_prediction = test["Sentiment"].values
train_trimmed = train.drop(["Sentiment"], axis=1).values
test_trimmed = test.drop(["Sentiment"], axis=1).values
return train_trimmed, test_trimmed, train_prediction, test_prediction
def split(dataset, test_size, balanced=True):
if balanced:
return headlines_balanced_split(dataset, test_size)
else:
# TODO: write imbalanced split function
return None
# Helpers #
def sliding_window(seq, n=2):
"""
Returns a sliding window (of width n) over data from the iterable. https://stackoverflow.com/a/6822773/8740440
"""
"s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ..."
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def integrate(avg_daily_sentiment, interval):
"""
Takes a list of average daily sentiment scores and returns a list of definite integral estimations calculated
with Simpson's method. Each integral interval is determined by the `interval` variable. Shows accumulated sentiment.
"""
# Split into sliding window list of lists
sentiment_windows = sliding_window(avg_daily_sentiment, interval)
integral_simpson_est = []
# https://stackoverflow.com/a/13323861/8740440
for x in sentiment_windows:
# Estimate area using composite Simpson's rule. dx indicates the spacing of the data on the x-axis.
integral_simpson_est.append(simps(x, dx=1))
dead_values = list([None] * interval)
dead_values.extend(integral_simpson_est)
dead_values.reverse()
return dead_values
def random_undersampling(dataset):
"""
Randomly deleting rows that contain the majority class until the number
in the majority class is equal with the number in the minority class.
"""
minority_set = dataset[dataset.Trend == -1.0]
majority_set = dataset[dataset.Trend == 1.0]
# print(dataset.Trend.value_counts())
# If minority set larger than majority set, swap
if len(minority_set) > len(majority_set):
minority_set, majority_set = majority_set, minority_set
# Downsample majority class
majority_downsampled = resample(majority_set,
replace=False, # sample without replacement
n_samples=len(minority_set), # to match minority class
random_state=123) # reproducible results
# Combine minority class with downsampled majority class
return pd.concat([majority_downsampled, minority_set])
def get_popularity(headlines):
# TODO: Randomize user-agents OR figure out how to handle popups
if "Tweets" not in headlines.columns:
counts = []
driver = webdriver.Chrome()
for index, row in headlines.iterrows():
try:
driver.get(row["URL"])
time.sleep(3)
twitter_containers = driver.find_elements_by_xpath("//li[@class='twitter']")
count = twitter_containers[0].find_elements_by_xpath("//span[@class='count']")
if count[0].text == "":
counts.append(1)
else:
counts.append(int(count[0].text))
except:
counts.append(1) # QUESTION: Should it be None?
headlines["Tweets"] = (pd.Series(counts)).values
print(counts)
return headlines
def balanced_split(dataset, test_size):
"""
Randomly splits dataset into balanced training and test sets.
"""
print("\tSplitting data into *balanced* training and test sets")
# Use sklearn.train_test_split to split original dataset into x_train, y_train, x_test, y_test numpy arrays
x_train, x_test, y_train, y_test = train_test_split(dataset.drop(["Date", "Trend"], axis=1).values, dataset["Trend"].values, test_size=test_size, random_state=RANDOM_STATE)
# Combine x_train and y_train (numpy arrays) into a single dataframe, with column labels
train = pd.DataFrame(data=x_train, columns=dataset.columns[1:-1])
train["Trend"] = pd.Series(y_train)
# Do the same for x_test and y__test
test = pd.DataFrame(data=x_test, columns=dataset.columns[1:-1])
test["Trend"] = pd.Series(y_test)
# Apply random undersampling to both data frames
train_downsampled = random_undersampling(train)
test_downsampled = random_undersampling(test)
train_trend = train_downsampled["Trend"].values
test_trend = test_downsampled["Trend"].values
train_trimmed = train_downsampled.drop(["Trend"], axis=1).values
test_trimmed = test_downsampled.drop(["Trend"], axis=1).values
return train_trimmed, test_trimmed, train_trend, test_trend
def unbalanced_split(dataset, test_size):
"""
Randomly splits dataset into unbalanced training and test sets.
"""
print("\tSplitting data into *unbalanced* training and test sets")
dataset = dataset.drop("Date", axis=1)
output = train_test_split(dataset.drop("Trend", axis=1).values, dataset["Trend"].values, test_size=test_size, random_state=RANDOM_STATE)
return output
# Main #
def calculate_indicators(ohlcv):
"""
Extracts technical indicators from OHLCV data.
"""
print("\tCalculating technical indicators")
ohlcv = ohlcv.drop(["Volume (BTC)", "Weighted Price"], axis=1)
ohlcv.columns = ["Date", "Open", "High", "Low", "Close", "Volume"]
temp_ohlcv = ohlcv.copy()
# Converts ISO 8601 timestamps to UNIX
unix_times = [int((dp.parse(temp_ohlcv.iloc[index]["Date"])).strftime("%s")) for index in range(temp_ohlcv.shape[0])]
temp_ohlcv["Date"] = (pd.Series(unix_times)).values
# Converts column headers to lowercase and sorts rows in chronological order
temp_ohlcv.columns = ["date", "open", "high", "low", "close", "volume"]
temp_ohlcv = temp_ohlcv.iloc[::-1]
# Rate of Change Ratio
rocr3 = ((Indicator(temp_ohlcv, "ROCR", 3)).getHistorical())[::-1]
rocr6 = ((Indicator(temp_ohlcv, "ROCR", 6)).getHistorical())[::-1]
# Average True Range
atr = ((Indicator(temp_ohlcv, "ATR", 14)).getHistorical())[::-1]
# On-Balance Volume
obv = ((Indicator(temp_ohlcv, "OBV")).getHistorical())[::-1]
# Triple Exponential Moving Average
trix = ((Indicator(temp_ohlcv, "TRIX", 20)).getHistorical())[::-1]
# Momentum
mom1 = ((Indicator(temp_ohlcv, "MOM", 1)).getHistorical())[::-1]
mom3 = ((Indicator(temp_ohlcv, "MOM", 3)).getHistorical())[::-1]
# Average Directional Index
adx14 = ((Indicator(temp_ohlcv, "ADX", 14)).getHistorical())[::-1]
adx20 = ((Indicator(temp_ohlcv, "ADX", 20)).getHistorical())[::-1]
# Williams %R
willr = ((Indicator(temp_ohlcv, "WILLR", 14)).getHistorical())[::-1]
# Relative Strength Index
rsi6 = ((Indicator(temp_ohlcv, "RSI", 6)).getHistorical())[::-1]
rsi12 = ((Indicator(temp_ohlcv, "RSI", 12)).getHistorical())[::-1]
# Moving Average Convergence Divergence
macd, macd_signal, macd_hist = (Indicator(temp_ohlcv, "MACD", 12, 26, 9)).getHistorical()
macd, macd_signal, macd_hist = macd[::-1], macd_signal[::-1], macd_hist[::-1]
# Exponential Moving Average
ema6 = ((Indicator(temp_ohlcv, "MA", 6, 1)).getHistorical())[::-1]
ema12 = ((Indicator(temp_ohlcv, "MA", 12, 1)).getHistorical())[::-1]
# Append indicators to the input datasets
min_length = min(len(mom1), len(mom3), len(adx14), len(adx20), len(willr), len(rsi6), len(rsi12), len(macd), len(macd_signal), len(macd_hist), len(ema6), len(ema12), len(rocr3), len(rocr6), len(atr), len(obv), len(trix))
ohlcv = ohlcv[:min_length].drop(["Open", "High", "Low"], axis=1)
ohlcv["MOM (1)"], ohlcv["MOM (3)"], ohlcv["ADX (14)"] = (pd.Series(mom1[:min_length])).values, (pd.Series(mom3[:min_length])).values, (pd.Series(adx14[:min_length])).values
ohlcv["ADX (20)"], ohlcv["WILLR"], ohlcv["RSI (6)"] = ( | pd.Series(adx20[:min_length]) | pandas.Series |
import unittest
from unittest import mock
from unittest.mock import MagicMock
import numpy as np
import pandas as pd
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from pandas.util.testing import assert_frame_equal
import tests.test_data as td
from shift_detector.checks.statistical_checks import numerical_statistical_check, categorical_statistical_check
from shift_detector.checks.statistical_checks.categorical_statistical_check import CategoricalStatisticalCheck
from shift_detector.checks.statistical_checks.numerical_statistical_check import NumericalStatisticalCheck
from shift_detector.checks.statistical_checks.text_metadata_statistical_check import TextMetadataStatisticalCheck
from shift_detector.detector import Detector
from shift_detector.precalculations.store import Store
from shift_detector.precalculations.text_metadata import NumCharsMetadata, NumWordsMetadata, \
DistinctWordsRatioMetadata, LanguagePerParagraph, UnknownWordRatioMetadata, StopwordRatioMetadata, LanguageMetadata
from shift_detector.utils.visualization import PlotData
class TestTextMetadataStatisticalCheck(unittest.TestCase):
def setUp(self):
self.poems = td.poems
self.phrases = td.phrases
def test_significant_metadata(self):
pvalues = pd.DataFrame([[0.001, 0.2]], columns=['num_chars', 'distinct_words_ratio'], index=['pvalue'])
result = TextMetadataStatisticalCheck(significance=0.01).significant_metadata_names(pvalues)
self.assertIn('num_chars', result)
self.assertNotIn('distinct_words_ratio', result)
def test_not_significant(self):
df1 = pd.DataFrame.from_dict({'text': self.poems})
df2 = pd.DataFrame.from_dict({'text': list(reversed(self.poems))})
store = Store(df1, df2)
result = TextMetadataStatisticalCheck().run(store)
self.assertEqual(1, len(result.examined_columns))
self.assertEqual(0, len(result.shifted_columns))
self.assertEqual(0, len(result.explanation))
def test_significant(self):
df1 = pd.DataFrame.from_dict({'text': self.poems})
df2 = pd.DataFrame.from_dict({'text': self.phrases})
store = Store(df1, df2)
result = TextMetadataStatisticalCheck([NumCharsMetadata(), NumWordsMetadata(),
DistinctWordsRatioMetadata(), LanguagePerParagraph()]
).run(store)
self.assertEqual(1, len(result.examined_columns))
self.assertEqual(1, len(result.shifted_columns))
self.assertEqual(1, len(result.explanation))
def test_compliance_with_detector(self):
df1 = pd.DataFrame.from_dict({'text': ['This is a very important text.',
'It contains information.', 'Brilliant ideas are written down.',
'Read it.', 'You will become a lot smarter.',
'Or you will waste your time.', 'Come on, figure it out!',
'Perhaps it will at least entertain you.', 'Do not be afraid.',
'Be brave!']})
df2 = pd.DataFrame.from_dict({'text': ['This is a very important text.',
'It contains information.', 'Brilliant ideas are written down.',
'Read it.', 'You will become a lot smarter.',
'Or you will waste your time.', 'Come on, figure it out!',
'Perhaps it will at least entertain you.', 'Do not be afraid.',
'Be brave!']})
detector = Detector(df1=df1, df2=df2, log_print=False)
detector.run(TextMetadataStatisticalCheck())
column_index = pd.MultiIndex.from_product([['text'], ['distinct_words', 'num_chars', 'num_words']],
names=['column', 'metadata'])
solution = pd.DataFrame([[1.0, 1.0, 1.0]], columns=column_index, index=['pvalue'])
self.assertEqual(1, len(detector.check_reports[0].examined_columns))
self.assertEqual(0, len(detector.check_reports[0].shifted_columns))
self.assertEqual(0, len(detector.check_reports[0].explanation))
assert_frame_equal(solution, detector.check_reports[0].information['test_results'])
def test_language_can_be_set(self):
check = TextMetadataStatisticalCheck([UnknownWordRatioMetadata(), StopwordRatioMetadata()], language='fr')
md_with_lang = [mdtype for mdtype in check.metadata_precalculation.text_metadata_types
if type(mdtype) in [UnknownWordRatioMetadata, StopwordRatioMetadata]]
for mdtype in md_with_lang:
self.assertEqual('fr', mdtype.language)
def test_infer_language_is_set(self):
check = TextMetadataStatisticalCheck([UnknownWordRatioMetadata(), StopwordRatioMetadata()], infer_language=True)
md_with_lang = [mdtype for mdtype in check.metadata_precalculation.text_metadata_types
if type(mdtype) in [UnknownWordRatioMetadata, StopwordRatioMetadata]]
for mdtype in md_with_lang:
self.assertTrue(mdtype.infer_language)
def test_figure_function_is_collected(self):
df1 = pd.DataFrame.from_dict({'text': ['blub'] * 10})
df2 = pd.DataFrame.from_dict({'text': ['blub'] * 10})
metadata_names = ['num_chars', 'num_words']
cols = pd.MultiIndex.from_product([df1.columns, metadata_names], names=['column', 'metadata'])
check = TextMetadataStatisticalCheck()
pvalues = | pd.DataFrame(columns=cols, index=['pvalue']) | pandas.DataFrame |
# flask 서버
import sys
import os
import dateutil.relativedelta
from flask import Flask,request,Response
from multiprocessing import Process
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import json
from functools import wraps
import mpld3
# koapy
from koapy import KiwoomOpenApiPlusEntrypoint, KiwoomOpenApiPlusTrInfo
from pandas import Timestamp
import matplotlib.pyplot as plt
import pandas as pd
from exchange_calendars import get_calendar
# DB
from DataBase.SqliteDB import StockDB
# Custom
from datetime import datetime
import logging
# Telegram
import telepot
if not os.path.exists('log'):
os.mkdir('log')
fh = logging.FileHandler(filename=os.path.join('log', '{:%Y-%m-%d}.log'.format(datetime.now())),
encoding="utf-8")
format = '[%(asctime)s] I %(filename)s | %(name)s-%(funcName)s-%(lineno)04d I %(levelname)-8s > %(message)s'
fh.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
logging.basicConfig(format=format, handlers=[fh, sh], level=logging.DEBUG)
########### init ###########
app = Flask(__name__)
server = Process()
stock_db = StockDB()
# 1. 엔트리포인트 객체 생성
entrypoint = KiwoomOpenApiPlusEntrypoint()
# 2. 로그인
print('Logging in...')
entrypoint.EnsureConnected()
logging.info('Logged in.')
base_account = entrypoint.GetAccountList()[0]
# 3. kospi/kosdaq 종목리스트 저장
# 종목 리스트 확인 (기본 함수 호출 예시)
print('Getting stock codes and names...')
codes = entrypoint.GetKospiCodeList()
names = [entrypoint.GetMasterCodeName(code) for code in codes]
codes_by_names_dict_kospi = dict(zip(names, codes))
names_by_codes_dict_kospi = dict(zip(codes, names))
codes = entrypoint.GetKosdaqCodeList()
names = [entrypoint.GetMasterCodeName(code) for code in codes]
codes_by_names_dict_kosdaq = dict(zip(names, codes))
names_by_codes_dict_kosdaq = dict(zip(codes, names))
logging.info('End stock codes and names...')
# 6.주문처리
krx_calendar = get_calendar('XKRX')
# 7.telegram 등록
def getToken():
f = open("telebot.txt")
token = f.readline().strip()
userid = f.readline().strip()
f.close()
return (token , userid)
(token, chat_id) = getToken()
bot = telepot.Bot(token)
def as_json(f):
@wraps(f)
def decorated_function(*args, **kwargs):
res = f(*args, **kwargs)
res = json.dumps(res, ensure_ascii=False, indent=4).encode('utf8')
return Response(res, content_type='application/json; charset=utf-8')
return decorated_function
@app.route('/')
def home():
# 접속 상태 확인 (기본 함수 호출 예시)
print('Checking connection status...')
status = entrypoint.GetConnectState()
print('Connection status: %s', status)
return 'Kiwoom Bridge Made By Dotz'
@app.route('/disconnect', methods=['GET'])
def disconnect():
# 리소스 해제
entrypoint.close()
shutdown_server()
print('Server shutting down...')
@app.route('/myaccount', methods=['GET'])
def myaccount():
sAccNo = base_account
account = stock_db.load_account_table().to_html()
tname = 'account_detail_{}'.format(sAccNo)
account_detail = stock_db.load_account_detail_table(tname)
result = account + '</br></br>'
result += account_detail.to_html()
return result
@app.route('/stock_list/<kind>')
@as_json
def get_stock_list(kind):
if kind == 'kospi':
return names_by_codes_dict_kospi
elif kind == 'kosdaq':
return names_by_codes_dict_kosdaq
@app.route('/basic_info/<code>')
@as_json
def get_basic_info(code): # 업데이트 예정
print('Getting basic info of %s', code)
info = entrypoint.GetStockBasicInfoAsDict(code)
print('Got basic info data (using GetStockBasicInfoAsDict):')
return info
@app.route('/index_stock_data/<name>')
def get_index_stock_data(name):
# date, open, high, low, close, volume
tname = stock_db.getTableName(name)
result = stock_db.load(tname)
if result is None:
return ('', 204)
html = "<div style=\"position: relative;\"><h1 align=\"center\">"+name+"지수 차트</h1>"
result = result.astype({'date': 'str', 'open': 'int', 'high': 'int', 'low': 'int', 'close': 'int', 'volume': 'int'})
result['open'] = result['open'].apply(lambda _: _ / 100 if _ > 0 else _)
result['high'] = result['high'].apply(lambda _: _ / 100 if _ > 0 else _)
result['low'] = result['low'].apply(lambda _: _ / 100 if _ > 0 else _)
result['close'] = result['close'].apply(lambda _: _ / 100 if _ > 0 else _)
dates = pd.to_datetime(result['date'], format='%Y%m%d')
closes = pd.to_numeric(result['close'])
f = plt.figure()
plt.plot(dates, closes)
html += mpld3.fig_to_html(f, figid='Index_Chart')
html += '</br></br>'
html += result.to_html()
return html
@app.route('/daily_stock_data/<code>')
def get_daily_stock_data(code):
parameter = request.args.to_dict()
startdate = ''
if len(parameter) > 0 and 'startdate' in parameter.keys():
startdate = parameter['startdate']
html = "<div style=\"position: relative;\"><h1 align=\"center\">"+get_name_by_code(code)+" 종목차트</h1>"
#date, open, high, low, close, volume
tname = stock_db.getTableName(code)
if validate(startdate):
result = stock_db.load_detail(tname, startdate)
else:
result = stock_db.load_detail(tname)
if result is None:
return ('', 204)
dates = pd.to_datetime(result['date'], format='%Y%m%d')
closes = pd.to_numeric(result['close'])
f = plt.figure()
plt.plot(dates, closes)
html += mpld3.fig_to_html(f, figid='Stock_Chart')
html += '</br></br>'
html += result.to_html()
return html
@app.route('/daily_detail_stock_data/<code>')
def get_daily_detail_stock_data(code):
parameter = request.args.to_dict()
startdate = ''
if len(parameter) > 0 and 'startdate' in parameter.keys():
startdate = parameter['startdate']
html = "<div style=\"position: relative;\"><h1 align=\"center\">"+get_name_by_code(code)+" 종목 일봉 상세차트</h1>"
# date, open, high, low, close, volume
tname = stock_db.getTableName(code)
if validate(startdate):
result = stock_db.load_detail(tname, startdate)
else:
result = stock_db.load_detail(tname)
if result is None:
return ('', 204)
html += result.to_html()
return html
@app.route('/order/<code>/<count>/<action>')
def order(code, count, action):
'''
:param sRQName: 사용자 구분명, 구분가능한 임의의 문자열
:param sScreenNo: 스크린번호
:param sAccNo: 계좌번호 10자리
:param nOrderType: 주문유형 1:신규매수, 2:신규매도 3:매수취소, 4:매도취소, 5:매수정정, 6:매도정정
:param sCode: 종목코드 (6자리)
:param nQty: 매매수량
:param nPrice: 매매가격 시장가:0
:param sHogaGb: 거래구분(혹은 호가구분) '지정가': '00', '시장가': '03', '조건부지정가': '05', '최유리지정가': '06', '최우선지정가': '07',
'지정가IOC': '10', '시장가IOC': '13', '최유리IOC': '16', '지정가FOK': '20', '시장가FOK': '23', '최유리FOK': '26',
'장전시간외종가': '61', '시간외단일가매매': '62', '장후시간외종가': '81'
:param sOrgOrderNo: 원주문번호. 신규주문에는 공백 입력, 정정/취소시 입력
:return:
[정규장 외 주문]
장전 동시호가 주문
08:30 ~ 09:00. 거래구분 00:지정가/03:시장가 (일반주문처럼)
※ 08:20 ~ 08:30 시간의 주문은 키움에서 대기하여 08:30 에 순서대로 거래소로 전송합니다.
장전시간외 종가
08:30 ~ 08:40. 거래구분 61:장전시간외종가. 가격 0입력
※ 전일 종가로 거래. 미체결시 자동취소되지 않음
장마감 동시호가 주문
15:20 ~ 15:30. 거래구분 00:지정가/03:시장가 (일반주문처럼)
장후 시간외 종가
15:40 ~ 16:00. 거래구분 81:장후시간외종가. 가격 0입력
※ 당일 종가로 거래
시간외 단일가
16:00 ~ 18:00. 거래구분 62:시간외단일가. 가격 입력
※ 10분 단위로 체결, 당일 종가대비 +-10% 가격으로 거래
'''
# 주문처리 파라미터 설정
sRQName = code + ' 주식 '+action
sScreenNo = '1000' # 화면번호, 0000 을 제외한 4자리 숫자 임의로 지정
sAccNo = entrypoint.GetAccountList()[0] # 계좌번호 10자리, GetFirstAvailableAccount() : 계좌번호 목록에서 첫번째로 발견한 계좌번호
nOrderType = 1 if action == 'buy' else 2 if action == 'sell' else 3
sCode = code
nQty = count
nPrice = 0
sHogaGb = '03'
sOrgOrderNo = ''
# 현재는 기본적으로 주문수량이 모두 소진되기 전까지 이벤트를 듣도록 되어있음 (단순 호출 예시)
if is_currently_in_session():
if action == 'hold':
logging.info('Holding action')
bot.sendMessage(chat_id=chat_id, text="%s %s" % (get_name_by_code(code), action))
return "종목 %s %s" % (code, action)
logging.info('Sending order to %s %s, quantity of %s stock, at market price...', action, code, count)
for event in entrypoint.OrderCall(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice,
sHogaGb, sOrgOrderNo):
print(event)
bot.sendMessage(chat_id=chat_id, text="%s %s개 %s" % (get_name_by_code(code), count, action))
update_account()
update_trading_record(code, get_name_by_code(code), count, action)
return "종목 %s %s개 %s주문" % (code, count, action)
else:
logging.info('Cannot send an order while market is not open, skipping...')
return 'Cannot send an order while market is not open, skipping...'
@app.route('/update_database')
def update_database():
print('Updating Database.......')
stock_list = []
index_dict = {"kospi": "001", "big": "002", "medium": "003", "small": "004", "kosdaq": "101", "kospi200": "201",
"kostar": "302", "krx100": "701"}
with open('stock_list.txt', 'r', encoding='utf-8') as f:
while True:
line = f.readline()
if not line: break
code = line.split(';')[0]
stock_list.append(code)
f.close()
# DB 한번에 업데이트
#save_account_info() 각 종목 매매 후 업데이트로 변경
save_index_stock_data('kospi')
logging.info(stock_list)
for code in stock_list:
save_daily_stock_data(code)
print('Finishing Update Database.......')
return ('', 204)
@app.route('/update_account')
def update_account():
print('Updating Account.......')
save_account_info()
print('Finishing Update Account.......')
return ('', 204)
def is_currently_in_session():
now = Timestamp.now(tz=krx_calendar.tz)
previous_open = krx_calendar.previous_open(now).astimezone(krx_calendar.tz)
next_close = krx_calendar.next_close(previous_open).astimezone(krx_calendar.tz)
return previous_open <= now <= next_close
def get_code_by_name(name):
if name in codes_by_names_dict_kospi.keys():
return codes_by_names_dict_kospi[name]
elif name in codes_by_names_dict_kosdaq.keys():
return codes_by_names_dict_kosdaq[name]
def get_name_by_code(code):
if code in names_by_codes_dict_kospi.keys():
return names_by_codes_dict_kospi[code]
elif code in names_by_codes_dict_kosdaq.keys():
return names_by_codes_dict_kosdaq[code]
def save_account_info():
print("Account is updating...")
actname = 'account_info'
if stock_db.checkTableName(actname) == False:
if stock_db.create_account_table() == False:
logging.debug('Account table create failed')
df = | pd.DataFrame(columns=['예수금', '출금가능금액', '총매입금액', '총평가금액', '총수익률(%)', '추정예탁자산']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import pytest
from pandas_appender import DF_Appender
# can append: df, series, dict-like, or list of these
# if you append a list of dicts, you end up with a column of objects
# always test ignore_index=True
def test_basics():
for a in range(1, 5):
dfa = DF_Appender(ignore_index=True)
for aa in range(a):
dfa.append({'a': aa})
df = dfa.finalize()
assert len(df) == a, 'appending dicts'
assert np.array_equal(df['a'].values, np.array(range(a)))
dfa = DF_Appender(df, ignore_index=True) # adding to the previous df
dfa.append({'a': a})
df = dfa.finalize()
assert len(df) == a + 1, 'appending dicts to previous df'
assert np.array_equal(df['a'].values, np.array(range(a+1)))
for a in range(1, 5):
dfa = DF_Appender()
for aa in range(a):
dfa.append({'a': aa})
df = dfa.finalize()
assert len(df) == a, 'appending dicts, ignore_index=False'
assert np.array_equal(df['a'].values, np.array(range(a)))
for a in range(1, 5):
dfa = DF_Appender(chunksize=1, ignore_index=True)
for aa in range(a):
dfa.append({'a': aa})
df = dfa.finalize()
print(df)
assert len(df) == a, 'appending dicts, minimum chunksize'
assert np.array_equal(df['a'].values, np.array(range(a)))
for a in range(1, 5):
dfa = DF_Appender(ignore_index=True)
for aa in range(a):
dfa.append(pd.Series([aa], name='a'))
df = dfa.finalize()
print(df)
assert len(df) == a, 'appending pd.Series of length 1'
#assert np.array_equal(df['a'].values, np.array(range(a))) # gets column name of '0'
for a in range(1, 5):
dfa = DF_Appender(ignore_index=True)
for aa in range(a):
dfa.append(pd.DataFrame([{'a': aa}]))
df = dfa.finalize()
assert len(df) == a, 'appending pd.Dataframe of length 1'
assert np.array_equal(df['a'].values, np.array(range(a)))
def test_hints():
dtypes = pd.Series({'a': 'float64', 'b': 'int64'})
dfa = DF_Appender(ignore_index=True, chunksize=2, dtypes=dtypes)
for aa in range(10):
dfa.append({'a': aa, 'b': aa})
df = dfa.finalize()
assert df.dtypes.equals(dtypes)
dtypes_dict = {'a': 'float64', 'b': 'int64'}
dfa = DF_Appender(ignore_index=True, chunksize=2, dtypes=dtypes_dict)
for aa in range(10):
dfa.append({'a': aa, 'b': aa})
df = dfa.finalize()
assert df.dtypes.equals(dtypes), 'dtype as dict works the same as dtype'
dtypes = | pd.Series({'a': 'category', 'b': 'int64'}) | pandas.Series |
#coding=utf-8
import pandas as pd
import numpy as np
import sys
import os
from sklearn import preprocessing
import datetime
import scipy as sc
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.externals import joblib
#import joblib
class FEbase(object):
"""description of class"""
def __init__(self, **kwargs):
pass
def create(self,*DataSetName):
#print (self.__class__.__name__)
(filepath, tempfilename) = os.path.split(DataSetName[0])
(filename, extension) = os.path.splitext(tempfilename)
#bufferstring='savetest2017.csv'
bufferstringoutput=filepath+'/'+filename+'_'+self.__class__.__name__+extension
if(os.path.exists(bufferstringoutput)==False):
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
df_all=self.core(DataSetName)
df_all.to_csv(bufferstringoutput)
return bufferstringoutput
def core(self,df_all,Data_adj_name=''):
return df_all
def real_FE():
return 0
class FEg30eom0110network(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
intflag=True
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
if(intflag):
df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
if(intflag):
df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
if(intflag):
df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max',True)
df_all,_=FEsingle.HighLowRange(df_all,8,True)
df_all,_=FEsingle.HighLowRange(df_all,25,True)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
if(intflag):
df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
if(intflag):
df_all['pct_chg_abs_rank']=df_all['pct_chg_abs_rank']*10//2
df_all=FEsingle.PctChgAbsSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,3,True)
df_all=FEsingle.PctChgSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,12,True)
df_all=FEsingle.AmountChgRank(df_all,12,True)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
if(intflag):
df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FEg30eom0110onlinew6d(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_all['sm_amount']=df_all.groupby('ts_code')['sm_amount'].shift(1)
df_all['lg_amount']=df_all.groupby('ts_code')['lg_amount'].shift(1)
df_all['net_mf_amount']=df_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FE_a23(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_Volatility(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all= | pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date']) | pandas.merge |
import itertools, sys, os
import csv
import glob
import numpy as np
import pandas as pd
import statistics
from statistics import mean
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import math
from itertools import tee
import collections
####define scoring scheme and important variables
id_list = ['V','X','Y','Z']
score_list = [10,5,-3,0]
keys = id_list
values = score_list
domain_dictionary = dict(zip(keys,values))
threshold = 0
window = 5000
sliiiiiide_to_the_right = 50
#Let's loop!
file1 = sys.argv[1]
with open(file1, 'r') as file:
print('Running file: '+ file.name)
count=0
count_start=-sliiiiiide_to_the_right
x_temp = list(file.read())
x = x_temp[:-1] #delete "/n" that's at the end of the list because we read in the file not explicitly the line
total_len = int(len(x))
#resume!
letter_list = [domain_dictionary[k] for k in x] #convert to scores
seq_score_nope = sum(letter_list)
#avg_score = mean(letter_list)
blocks = int(((len(x) - window) / sliiiiiide_to_the_right) + 1)
blocks_2 = blocks + 2 #need this otherwise the last (incomplete/little) block will be cut off!
#print("you will have " + str(blocks_2) + " windows")
cols = ['Window', 'Position start', 'Position stop','Pass/Fail', 'Score', 'V_count', 'X_count', 'Z_count', 'Y_count']
dat = pd.DataFrame(columns = cols)
#
for i in range(0, blocks_2 * sliiiiiide_to_the_right, sliiiiiide_to_the_right):
score_result = sum(letter_list[i:i+window])
new_let_list = x[i:i+window]
if score_result >= 0 :
PF_result = "pass"
else:
PF_result = "fail"
#counts for later
V_count = new_let_list.count('V')
X_count = new_let_list.count('X')
Z_count = new_let_list.count('Z')
Y_count = new_let_list.count('Y')
#vars for count columns
count = count +1
count_start += sliiiiiide_to_the_right #same as c_s = c_s + siiii...
count_stop = count_start+window
#dat.index.name = 'Window'
#let's plot things!
dat = dat.append({'Window': count,'Position start' : count_start, 'Position stop': count_stop,'Pass/Fail': PF_result, 'Score': score_result, 'V_count': V_count,
'X_count': X_count, 'Y_count': Y_count, 'Z_count': Z_count},ignore_index=True)
#dat.index.name = 'Window'
outname = (str(file.name)+".tableout.tsv")
#dat.to_csv(outname, sep='\t', index=False)
#FIGURES
pdf_outname = (str(file.name)+".figures.pdf")
#Character ocunts plot
#figures = PdfPages(pdf_outname)
dat.to_csv(outname, sep='\t', index=False)
#MAIN DATAFRAME CREATED, STORED IN DAT
#Now let's make the smoothed plot
df_0 = dat
#median_0 = df_0['Annotation'].median()
x = df_0['Window']
y = np.array(df_0['Score'])
l = df_0['Window'].count()
df_empty = pd.DataFrame(index=range(l),columns=range(1))
for col in df_empty.columns:
df_empty[col].values[:] = 0
zero=df_empty[0]
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
#smooth_val == box_plts
smooth_val = 100 #####we can change this if we want!
#statement for handling short sequences (error called if len(y) < smoothing value)
if len(y) <= smooth_val:
smooth_val = (0.5 * len(y))
else:
smooth_val = smooth_val
smoth = smooth(y,smooth_val)
idx = np.argwhere(np.diff(np.sign(zero - smoth))).flatten()
df = pd.DataFrame(zero[idx])
df = df.reset_index()
#we will save to figures, but first we need to do the validation steps
#This is for validating if region is + or -
df.loc[-1] = 1 # adding a row for first position
df.index = df.index + 1 # shifting index
df = df.sort_index()
#df.iloc[-1] = len(y)
#last position as last row
#print(df['index'])
df.sort_values(by=['index']) #need to sort first otherwise +1 belwo will break things
new_list = pd.DataFrame(df['index'] + 1) #df['index'][:-1] + 1 #add +1 to all for next position is +/-, except for last position, will throw erre - so it deletes it, we'll add it in later
#print(new_list)
#the_val_to_add = df.iloc[-1] - 1
#new_list = new_list.append(df.iloc[-1] - 1) #beacuse of +1 transformation few lines above
new_list_2 = new_list['index']
#new_list = new_list.append(last_val_to_append, ignore_index=True)
new_y_val = list(smoth[new_list]) #find position y on smooth line
#assigning pos / neg for that +1 position
pos_neg_results = []
for i in new_y_val:
if i > 0:
result = '+'
else:
result = '-'
pos_neg_results.append(result)
#pos_neg_results.append('N/A') #the last value needs this - not anymore
#print(pos_neg_results)
#creating dataframe for next steps
df.drop(df.columns[len(df.columns)-1], axis=1, inplace=True) #to delete last column, unnamed so tricky to get rid of (?) this does it tho
df['+/- to the right'] = pos_neg_results
#print(df['+/- to the right'])
#append +/- and start stop coords from original table
df.rename(columns={'index': 'Window'}, inplace=True)
df['Window']=df['Window'].astype(int)
df_0['Window']=df_0['Window'].astype(int)
merged_df = df.merge(df_0, how = 'inner', on = ['Window'])
merged_df = merged_df.drop(['Pass/Fail','Score','V_count','X_count','Z_count','Y_count'], axis = 1)
merged_df['Chunk_end'] = 'none'
merged_df['Window midpoint'] = merged_df.iloc[:,[2,3]].median(axis=1)
merged_df['Window midpoint'] = merged_df['Window midpoint'].astype(int)
#df edits to accomodate this:
#we are duplicating the last row of the df to handle a trailing + chunk (w/ no y=0 intercept to close the chunk)
merged_df = merged_df.append(merged_df[-1:])
#now need to make it read actual last stop position (this os not rounded per window like the other coords)
merged_df = merged_df.replace(merged_df.iloc[-1][3],(total_len+1))
print(merged_df)
#now let's get the coordinates for the > 0 'chunks'
#iterate over for true hit testing
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
#file name to be used later
actual_file_name_temp = str(file.name[:-17])
#this is to define the chunks, accounting for all the ways the graph can look
#note: leading and trailing here mean a chunk at the start or end of the graph that
ddf_list = []
for (i1, row1), (i2, row2) in pairwise(merged_df.iterrows()):
#for a leading chunk
if row1['+/- to the right'] == '+' and \
row1["Position start"] == 0 and \
row1["Position stop"] != (total_len + 1):
ddf = ["Chunk_" + str(i1), row1["Position start"], row2["Window midpoint"]]
ddf_list.append(ddf)
#for a contained chunk
if row1['+/- to the right'] == '+' and \
row1["Position start"] != 0 and \
row1["Position stop"] != (total_len + 1):
ddf = ["Chunk_" + str(i1), row1["Window midpoint"], row2["Window midpoint"]]
ddf_list.append(ddf)
#3. for a trailing chunk
if row1['+/- to the right'] == '+' and \
row1["Position start"] != 0 and \
row1["Position stop"] == (total_len + 1): #old = merged_df.iloc[0,3]
ddf = ["Chunk_" + str(i1), row1["Window midpoint"], row2["Position stop"]]
ddf_list.append(ddf)
#4. for graphs with no leading and no trailing chunk (for graphs with no y = 0 intercept -> this is is
#a differently-defined statemnt below b/c the empty file gets appended w/ stuff above from older files when
#it's in the loop, ALSO the criterion gets fulfilled by contained cunks which means duplicate csv rows for chunks (defined diffrently to specifiy the rules)
if merged_df.iloc[0,1] == '+' and \
merged_df.iloc[0,2] == 0 and \
merged_df.iloc[0,3] == (total_len + 1): #if first column last(2nd row) == last -1 then its one chunk
rep_list = [('Chunk_0', '0', (total_len+1))]
ddf_list = rep_list
else:
ddf_list = ddf_list
#print(merged_df)
#print(ddf_list)
#make chunk csv
df = pd.DataFrame(ddf_list)
this_name = str(file.name+"_chunk_coordinates.csv") #used to be fna_name
df.to_csv(this_name, index = False)
###Find optimal location on plot to place validation marker
#read in virus table
#file_name_just_stem = file.name[:-4]
vir_bait_table = str(actual_file_name_temp+'.VIRUS_BAIT_TABLE.txt')
with open(vir_bait_table, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter='\t')
lines = list(reader)
vir_bait_table = pd.DataFrame(lines)
vir_bait_table['median'] = round(vir_bait_table[[1,2]].median(axis=1))
vir_bait_table_med_list = list(vir_bait_table['median'])
#print(vir_bait_table_med_list)
points_list = []
for item in vir_bait_table_med_list:
eq = round(((item - 2500) + 50) / 50)
if eq >= len(x):
plot_point = (len(x) - 1) #1 because it can't = len, has to be less
else:
plot_point = eq
#plot_point = round(((item - 2500) + 50) / 50) #this must stay at = window length (not half like we had talked about, it makes illogical values...basically if the coordinate is towards the end, applying a window 'inbetween' can be out of bounds)
points_list.append(plot_point)
new_points_list = [1 if i <=0 else i for i in points_list]
#print(points_list) #each item represents/is the best/closet window that captures the viral hallmark region
zero=df_empty[0]
figures = PdfPages(pdf_outname)
x2 = (points_list)
plt.plot(x, y, 'o', ms=0.6)
plt.axhline(0, 0, l)
#plt.plot(x, smooth(y,3), 'r-', lw=2)
#p = smooth(y,100)
plt.plot(x, smooth(y,100), 'c', lw=2)
plt.plot(x, smooth(y,100), 'y', markevery = (new_points_list), ms=11.0, marker = '*')
plt.title("Viral region calls")
plt.xlabel('Window')
plt.ylabel('Score')
plt.rc('axes', titlesize=6.8) # fontsize of the axes title
plt.rc('xtick', labelsize=5) # fontsize of the tick labels
plt.rc('ytick', labelsize=5) # fontsize of the tick labels
plt.rc('legend', fontsize=5) # legend fontsize
plt.rc('figure', titlesize=8) # fontsize of the figure title
plt.grid(True)
idx = np.argwhere(np.diff(np.sign(zero - smooth(y,100)))).flatten()
plt.plot(x[idx], zero[idx], 'ro', ms=5.0)
#plt.plot(x[idx], zero[idx], markevery= (points_list), ms=9.0, marker = 'X', color = 'y')
#plt.plot()
df = | pd.DataFrame(zero[idx]) | pandas.DataFrame |
import calendar
from struct import unpack, calcsize
import numpy as np
import pandas as pd
import os
from phildb.constants import METADATA_MISSING_VALUE
from phildb.log_handler import LogHandler
def __read(filename):
field_names = ["date", "value", "metaID"]
entry_format = "<qdi" # long, double, int; See field names above.
entry_size = calcsize(entry_format)
if not os.path.exists(filename):
return pd.DataFrame(None, columns=["date", "value", "metaID"])
records = np.fromfile(
filename, dtype=np.dtype({"names": field_names, "formats": entry_format[1:]})
)
if len(records) == 0:
return pd.DataFrame(None, columns=["date", "value", "metaID"])
df = pd.DataFrame(records, columns=field_names)
df["date"] = | pd.to_datetime(df["date"], unit="s") | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# In[87]:
import pandas as pd
import numpy as np
import scipy.stats as stats
from datetime import datetime
from datetime import date
import csv
# In[116]:
test = pd.read_csv('./FY1419/TrusteeFY1419P10.csv')
# In[117]:
values={'Ceased Date':date.today()}
# In[118]:
test=test.rename(columns={"BN/Registration Number": "BN/Registration Number"})
# In[119]:
print(test)
# In[120]:
test['Appointed Date'] = | pd.to_datetime(test['Appointed Date'],format='%m/%d/%Y %H:%M') | pandas.to_datetime |
"""util class for doing searches"""
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from typing import Dict, List
from operator import attrgetter
import pandas as pd
import numpy as np
from dfcx_scrapi.core import scrapi_base
from dfcx_scrapi.core import intents
from dfcx_scrapi.core import flows
from dfcx_scrapi.core import pages
from dfcx_scrapi.core import entity_types
from dfcx_scrapi.core import transition_route_groups
from google.cloud.dialogflowcx_v3beta1 import types
from google.oauth2 import service_account
# logging config
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)-8s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
class SearchUtil(scrapi_base.ScrapiBase):
"""class for searching items"""
def __init__(
self,
creds_path: str = None,
creds_dict: Dict[str, str] = None,
creds: service_account.Credentials = None,
scope=False,
agent_id: str = None,
):
super().__init__(
creds_path=creds_path,
creds_dict=creds_dict,
creds=creds,
scope=scope,
)
logging.info("create dfcx creds %s", creds_path)
self.intents = intents.Intents(
creds_path=creds_path, creds_dict=creds_dict
)
self.entities = entity_types.EntityTypes(
creds_path=creds_path, creds_dict=creds_dict
)
self.flows = flows.Flows(creds_path=creds_path, creds_dict=creds_dict)
self.pages = pages.Pages(creds_path=creds_path, creds_dict=creds_dict)
self.route_groups = transition_route_groups.TransitionRouteGroups(
creds_path, creds_dict
)
self.creds_path = creds_path
self.intents_map = None
if agent_id:
self.agent_id = agent_id
self.flow_map = self.flows.get_flows_map(
agent_id=agent_id, reverse=True
)
self.intents_map = self.intents.get_intents_map(agent_id)
self.client_options = self._set_region(agent_id)
@staticmethod
def get_route_df(page_df: pd.DataFrame, route_group_df: pd.DataFrame):
"""Gets a route dataframe from page- and route-group-dataframes.
Args:
page_df: dataframe with required columns flow_name, page_name,
routes (where routes are types.TransitionRoute) such as
from get_page_df().
route_group_df: dataframe woth required columns flow_name,
page_name, route_group_name, routes (where routes are
types.TransitionRoute) such as from get_route_group_df().
Returns:
routes dataframe with columns:
flow_name,
page_name,
route_group_name,
intent,
condition,
trigger_fulfillment
"""
routes_df = (
pd.concat(
[page_df[["flow_name", "page_name", "routes"]], route_group_df],
ignore_index=True,
)
.explode("routes", ignore_index=True)
.dropna(subset=["routes"], axis="index")
.assign(
intent=lambda df: df.routes.apply(attrgetter("intent")),
condition=lambda df: df.routes.apply(attrgetter("condition")),
trigger_fulfillment=lambda df: df.routes.apply(
attrgetter("trigger_fulfillment")
),
)
.drop(columns="routes")
)
return routes_df
@staticmethod
def get_param_df(page_df: pd.DataFrame):
"""Gets a parameter dataframe from an input page dataframe.
Args:
page_df: dataframe with minimum columns flow_name, page_name,
parameters (types.Form.Parameter), such as from get_page_df().
Returns:
dataframe with columns:
flow_name,
page_name,
parameter_name,
reprompt_event_handlers,
initial_prompt_fulfillment
"""
param_df = (
page_df[["flow_name", "page_name", "parameters"]]
.explode("parameters", ignore_index=True)
.dropna(subset=["parameters"], axis="index")
.assign(
parameter_name=lambda df: df.parameters.apply(
attrgetter("display_name")
),
reprompt_event_handlers=lambda df: df.parameters.apply(
attrgetter("fill_behavior.reprompt_event_handlers")
),
initial_prompt_fulfillment=lambda df: df.parameters.apply(
attrgetter("fill_behavior.initial_prompt_fulfillment")
),
)
.drop(columns="parameters")
)
return param_df
@staticmethod
def get_event_handler_df(page_df, param_reprompt_event_handler_df):
"""Gets an event handler dataframe from page- and parameter-dataframes.
Args:
page_df: dataframe with minimum columns flow_name, page_name,
event_handlers (types.EventHandler), such as from
get_page_df().
param_reprompt_event_handler_df: dataframe with minimum columns
flow_name, page_name, parameter_name, reprompt_event_handlers
(types.EventHandler), such as from get_param_df().
Returns:
dataframe with columns: flow_name, page_name, parameter_name, event,
trigger_fulfillment.
"""
event_handler_df = (
pd.concat(
[
page_df[["flow_name", "page_name", "event_handlers"]],
param_reprompt_event_handler_df.rename(
columns={"reprompt_event_handlers": "event_handlers"}
),
],
ignore_index=True,
)
.explode("event_handlers", ignore_index=True)
.dropna(subset=["event_handlers"], axis="index")
.assign(
event=lambda df: df.event_handlers.apply(attrgetter("event")),
trigger_fulfillment=lambda df: df.event_handlers.apply(
attrgetter("trigger_fulfillment")
),
)
.drop(columns="event_handlers")
)
return event_handler_df
@staticmethod
def _get_msg_type(message: types.ResponseMessage):
"""Gets the response message type for a message from a fulfillment.
Args:
message: message structure from a fulfillment.
Returns:
type in {np.nan, text, custom_payload, play_audio,
live_agent_handoff, conversation_success, output_audio_text}.
"""
if pd.isna(message):
value = np.nan
elif isinstance(message, types.ResponseMessage) and (
str(message) == ""
):
value = np.nan
elif "text" in message:
value = "text"
elif "payload" in message:
value = "custom_payload"
elif "play_audio" in message:
value = "play_audio"
elif "live_agent_handoff" in message:
value = "live_agent_handoff"
elif "conversation_success" in message:
value = "conversation_success"
elif "output_audio_text" in message:
value = "output_audio_text"
else:
value = "unexpected value"
return value
@staticmethod
def _gather_text_responses(text_message: types.ResponseMessage.Text):
"""Flattens a Dialogflow CX text structure.
Args:
text_message: text such as is inside types.ResponseMessage.
Returns:
flattened text in a string.
"""
flat_texts = "\n".join(text_message.text)
return flat_texts
def _format_response_message(
self, message: types.ResponseMessage, message_format: str
):
"""Conditionally unpacks message formats.
Args:
message: structure such as from a fulfillment.
message_format: 'dict' or 'human-readable'
Returns:
unpacked contents of message.
"""
if pd.isna(message):
contents = np.nan
elif isinstance(message, types.ResponseMessage) and (
str(message) == ""
):
contents = np.nan
elif "payload" in message:
c = self.recurse_proto_marshal_to_dict(message.payload)
contents = {"payload": c} if (message_format == "dict") else c
elif "play_audio" in message:
c = {"audio_uri": message.play_audio.audio_uri}
contents = {"play_audio": c} if (message_format == "dict") else c
elif "live_agent_handoff" in message:
c = self.recurse_proto_marshal_to_dict(
message.live_agent_handoff.metadata
)
contents = (
{"live_agent_handoff": c} if (message_format == "dict") else c
)
elif "conversation_success" in message:
c = self.recurse_proto_marshal_to_dict(
message.conversation_success.metadata
)
contents = (
{"conversation_success": c} if (message_format == "dict") else c
)
elif "output_audio_text" in message:
c = message.output_audio_text.text
contents = (
{"output_audio_text": c} if (message_format == "dict") else c
)
elif "text" in message:
c = SearchUtil._gather_text_responses(message.text)
contents = {"text": c} if (message_format == "dict") else c
else:
contents = message
return contents
def _find_true_routes_flow_level(self, flow_display_name, flow_map):
flow_id = flow_map[flow_display_name]
start_page = self.flows.get_flow(flow_id) # pylint: disable=W0612
other_pages = self.pages.list_pages(flow_id)
# Start page - no entry fulfillment
pages_dataframe = pd.DataFrame()
for page in other_pages:
display_name = page.display_name
webhook = False
if page.entry_fulfillment.webhook:
webhook = True
has_parameters = False
if page.form.parameters:
has_parameters = True
has_true_route = False
has_true_final_route = False
for route in page.transition_routes:
if route.condition == "true":
has_true_route = True
if route.condition == '$page.params.status = "FINAL" AND true':
has_true_final_route = True
page_dataframe = pd.DataFrame(
columns=[
"flow_display_name",
"page_display_name",
"webhook_entry_fullfillment",
"has_parameters",
"has_true_route",
"has_true_and_final_route",
],
data=[
[
flow_display_name,
display_name,
webhook,
has_parameters,
has_true_route,
has_true_final_route,
]
],
)
pages_dataframe = pages_dataframe.append(page_dataframe)
return pages_dataframe
# Flows - event handlers
def _flow_level_handlers(self):
flows_in_agent = self.flows.list_flows(self.agent_id)
flow_event_handler_data = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from PyQt5.QtCore import Qt, pyqtSignal
from PyQt5.QtWidgets import QTableWidget, QTableWidgetItem, QPlainTextEdit, QSlider, QWidget, QVBoxLayout, QLabel, \
QHBoxLayout, QPushButton
from util.langUtil import check_if_valid_timestr
def get_datatable_sheet(table: QTableWidget):
map = []
for i in range(table.rowCount()):
row = []
for u in range(table.columnCount()):
if table.item(i, u):
row.append(table.item(i, u).text())
else:
row.append("")
map.append(row)
# Ignore not-full rows AND symbol/interval not allowed!
map = [row for row in map if row[0] and row[1] and row[2]
and check_if_valid_timestr(row[1]) and check_if_valid_timestr(row[2])]
data = {
'symbol': [row[0] for row in map],
'interval': [row[1] for row in map],
'period': [row[2] for row in map],
}
df = pd.DataFrame(data)
return df
def get_datatable_sheet_all(table: QTableWidget):
map = []
for i in range(table.rowCount()):
row = []
for u in range(table.columnCount()):
if table.item(i, u):
row.append(table.item(i, u).text())
else:
row.append("")
map.append(row)
map = [row for row in map if row[0] or row[1] or row[2]]
data = {
'symbol': [row[0] for row in map],
'interval': [row[1] for row in map],
'period': [row[2] for row in map],
}
df = pd.DataFrame(data)
return df
def get_datatable_sheet_col(table: QTableWidget, col: int):
map = []
for i in range(table.rowCount()):
map.append(table.item(i, col).text())
# Stop at a blank
if not map[-1]:
break
map = [row for row in map if row]
data = {
'data': [row[0] for row in map],
}
df = | pd.DataFrame(data) | pandas.DataFrame |
"""PyStan utility functions
These functions validate and organize data passed to and from the
classes and functions defined in the file `stan_fit.hpp` and wrapped
by the Cython file `stan_fit.pxd`.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2015, PyStan developers
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
#-----------------------------------------------------------------------------
# REF: rstan/rstan/R/misc.R
from __future__ import unicode_literals, division
from pystan._compat import PY2, string_types
from collections import OrderedDict
if PY2:
from collections import Callable, Iterable, Sequence
else:
from collections.abc import Callable, Iterable, Sequence
import inspect
import io
import itertools
import logging
import math
from numbers import Number
import os
import random
import re
import sys
import shutil
import tempfile
import time
import numpy as np
try:
from scipy.stats.mstats import mquantiles
except ImportError:
from pystan.external.scipy.mstats import mquantiles
import pystan.chains
import pystan._misc
from pystan.constants import (MAX_UINT, sampling_algo_t, optim_algo_t,
variational_algo_t, sampling_metric_t, stan_args_method_t)
logger = logging.getLogger('pystan')
def stansummary(fit, pars=None, probs=(0.025, 0.25, 0.5, 0.75, 0.975), digits_summary=2):
"""
Summary statistic table.
Parameters
----------
fit : StanFit4Model object
pars : str or sequence of str, optional
Parameter names. By default use all parameters
probs : sequence of float, optional
Quantiles. By default, (0.025, 0.25, 0.5, 0.75, 0.975)
digits_summary : int, optional
Number of significant digits. By default, 2
Returns
-------
summary : string
Table includes mean, se_mean, sd, probs_0, ..., probs_n, n_eff and Rhat.
Examples
--------
>>> model_code = 'parameters {real y;} model {y ~ normal(0,1);}'
>>> m = StanModel(model_code=model_code, model_name="example_model")
>>> fit = m.sampling()
>>> print(stansummary(fit))
Inference for Stan model: example_model.
4 chains, each with iter=2000; warmup=1000; thin=1;
post-warmup draws per chain=1000, total post-warmup draws=4000.
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
y 0.01 0.03 1.0 -2.01 -0.68 0.02 0.72 1.97 1330 1.0
lp__ -0.5 0.02 0.68 -2.44 -0.66 -0.24 -0.05-5.5e-4 1555 1.0
Samples were drawn using NUTS at Thu Aug 17 00:52:25 2017.
For each parameter, n_eff is a crude measure of effective sample size,
and Rhat is the potential scale reduction factor on split chains (at
convergence, Rhat=1).
"""
if fit.mode == 1:
return "Stan model '{}' is of mode 'test_grad';\n"\
"sampling is not conducted.".format(fit.model_name)
elif fit.mode == 2:
return "Stan model '{}' does not contain samples.".format(fit.model_name)
n_kept = [s - w for s, w in zip(fit.sim['n_save'], fit.sim['warmup2'])]
header = "Inference for Stan model: {}.\n".format(fit.model_name)
header += "{} chains, each with iter={}; warmup={}; thin={}; \n"
header = header.format(fit.sim['chains'], fit.sim['iter'], fit.sim['warmup'],
fit.sim['thin'], sum(n_kept))
header += "post-warmup draws per chain={}, total post-warmup draws={}.\n\n"
header = header.format(n_kept[0], sum(n_kept))
footer = "\n\nSamples were drawn using {} at {}.\n"\
"For each parameter, n_eff is a crude measure of effective sample size,\n"\
"and Rhat is the potential scale reduction factor on split chains (at \n"\
"convergence, Rhat=1)."
sampler = fit.sim['samples'][0]['args']['sampler_t']
date = fit.date.strftime('%c') # %c is locale's representation
footer = footer.format(sampler, date)
s = _summary(fit, pars, probs)
body = _array_to_table(s['summary'], s['summary_rownames'],
s['summary_colnames'], digits_summary)
return header + body + footer
def _print_stanfit(fit, pars=None, probs=(0.025, 0.25, 0.5, 0.75, 0.975), digits_summary=2):
# warning added in PyStan 2.17.0
logger.warning('Function `_print_stanfit` is deprecated and will be removed in a future version. '\
'Use `stansummary` instead.', DeprecationWarning)
return stansummary(fit, pars=pars, probs=probs, digits_summary=digits_summary)
def _array_to_table(arr, rownames, colnames, n_digits):
"""Print an array with row and column names
Example:
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
beta[1,1] 0.0 0.0 1.0 -2.0 -0.7 0.0 0.7 2.0 4000 1
beta[1,2] 0.0 0.0 1.0 -2.1 -0.7 0.0 0.7 2.0 4000 1
beta[2,1] 0.0 0.0 1.0 -2.0 -0.7 0.0 0.7 2.0 4000 1
beta[2,2] 0.0 0.0 1.0 -1.9 -0.6 0.0 0.7 2.0 4000 1
lp__ -4.2 0.1 2.1 -9.4 -5.4 -3.8 -2.7 -1.2 317 1
"""
assert arr.shape == (len(rownames), len(colnames))
rownames_maxwidth = max(len(n) for n in rownames)
max_col_width = 7
min_col_width = 5
max_col_header_num_width = [max(max_col_width, max(len(n) + 1, min_col_width)) for n in colnames]
rows = []
for row in arr:
row_nums = []
for j, (num, width) in enumerate(zip(row, max_col_header_num_width)):
if colnames[j] == "n_eff":
num = int(round(num, 0)) if not np.isnan(num) else num
num = _format_number(num, n_digits, max_col_width - 1)
row_nums.append(num)
if len(num) + 1 > max_col_header_num_width[j]:
max_col_header_num_width[j] = len(num) + 1
rows.append(row_nums)
widths = [rownames_maxwidth] + max_col_header_num_width
header = '{:>{width}}'.format('', width=widths[0])
for name, width in zip(colnames, widths[1:]):
header += '{name:>{width}}'.format(name=name, width=width)
lines = [header]
for rowname, row in zip(rownames, rows):
line = '{name:{width}}'.format(name=rowname, width=widths[0])
for j, (num, width) in enumerate(zip(row, widths[1:])):
line += '{num:>{width}}'.format(num=num, width=width)
lines.append(line)
return '\n'.join(lines)
def _number_width(n):
"""Calculate the width in characters required to print a number
For example, -1024 takes 5 characters. -0.034 takes 6 characters.
"""
return len(str(n))
def _format_number_si(num, n_signif_figures):
"""Format a number using scientific notation to given significant figures"""
if math.isnan(num) or math.isinf(num):
return str(num)
leading, exp = '{:E}'.format(num).split('E')
leading = round(float(leading), n_signif_figures - 1)
exp = exp[:1] + exp[2:] if exp[1] == '0' else exp
formatted = '{}e{}'.format(leading, exp.lstrip('+'))
return formatted
def _format_number(num, n_signif_figures, max_width):
"""Format a number as a string while obeying space constraints.
`n_signif_figures` is the minimum number of significant figures expressed
`max_width` is the maximum width in characters allowed
"""
if max_width < 6:
raise NotImplementedError("Guaranteed formatting in fewer than 6 characters not supported.")
if math.isnan(num) or math.isinf(num):
return str(num)
# add 0.5 to prevent log(0) errors; only affects n_digits calculation for num > 0
n_digits = lambda num: math.floor(math.log10(abs(num) + 0.5)) + 1
if abs(num) > 10**-n_signif_figures and n_digits(num) <= max_width - n_signif_figures:
return str(round(num, n_signif_figures))[:max_width].rstrip('.')
elif _number_width(num) <= max_width:
if n_digits(num) >= n_signif_figures:
# the int() is necessary for consistency between Python 2 and 3
return str(int(round(num)))
else:
return str(num)
else:
return _format_number_si(num, n_signif_figures)
def _summary(fit, pars=None, probs=None, **kwargs):
"""Summarize samples (compute mean, SD, quantiles) in all chains.
REF: stanfit-class.R summary method
Parameters
----------
fit : StanFit4Model object
pars : str or sequence of str, optional
Parameter names. By default use all parameters
probs : sequence of float, optional
Quantiles. By default, (0.025, 0.25, 0.5, 0.75, 0.975)
Returns
-------
summaries : OrderedDict of array
Array indexed by 'summary' has dimensions (num_params, num_statistics).
Parameters are unraveled in *row-major order*. Statistics include: mean,
se_mean, sd, probs_0, ..., probs_n, n_eff, and Rhat. Array indexed by
'c_summary' breaks down the statistics by chain and has dimensions
(num_params, num_statistics_c_summary, num_chains). Statistics for
`c_summary` are the same as for `summary` with the exception that
se_mean, n_eff, and Rhat are absent. Row names and column names are
also included in the OrderedDict.
"""
if fit.mode == 1:
msg = "Stan model {} is of mode 'test_grad'; sampling is not conducted."
msg = msg.format(fit.model_name)
raise ValueError(msg)
elif fit.mode == 2:
msg = "Stan model {} contains no samples.".format(fit.model_name)
raise ValueError(msg)
if fit.sim['n_save'] == fit.sim['warmup2']:
msg = "Stan model {} contains no samples.".format(fit.model_name)
raise ValueError(msg)
# rstan checks for cached summaries here
if pars is None:
pars = fit.sim['pars_oi']
elif isinstance(pars, string_types):
pars = [pars]
pars = _remove_empty_pars(pars, fit.sim['pars_oi'], fit.sim['dims_oi'])
if probs is None:
probs = (0.025, 0.25, 0.5, 0.75, 0.975)
ss = _summary_sim(fit.sim, pars, probs)
# TODO: include sem, ess and rhat: ss['ess'], ss['rhat']
s1 = np.column_stack([ss['msd'][:, 0], ss['sem'], ss['msd'][:, 1], ss['quan'], ss['ess'], ss['rhat']])
s1_rownames = ss['c_msd_names']['parameters']
s1_colnames = ((ss['c_msd_names']['stats'][0],) + ('se_mean',) +
(ss['c_msd_names']['stats'][1],) + ss['c_quan_names']['stats'] +
('n_eff', 'Rhat'))
s2 = _combine_msd_quan(ss['c_msd'], ss['c_quan'])
s2_rownames = ss['c_msd_names']['parameters']
s2_colnames = ss['c_msd_names']['stats'] + ss['c_quan_names']['stats']
return OrderedDict(summary=s1, c_summary=s2,
summary_rownames=s1_rownames,
summary_colnames=s1_colnames,
c_summary_rownames=s2_rownames,
c_summary_colnames=s2_colnames)
def _combine_msd_quan(msd, quan):
"""Combine msd and quantiles in chain summary
Parameters
----------
msd : array of shape (num_params, 2, num_chains)
mean and sd for chains
cquan : array of shape (num_params, num_quan, num_chains)
quantiles for chains
Returns
-------
msdquan : array of shape (num_params, 2 + num_quan, num_chains)
"""
dim1 = msd.shape
n_par, _, n_chains = dim1
ll = []
for i in range(n_chains):
a1 = msd[:, :, i]
a2 = quan[:, :, i]
ll.append(np.column_stack([a1, a2]))
msdquan = np.dstack(ll)
return msdquan
def _summary_sim(sim, pars, probs):
"""Summarize chains together and separately
REF: rstan/rstan/R/misc.R
Parameters are unraveled in *column-major order*.
Parameters
----------
sim : dict
dict from from a stanfit fit object, i.e., fit['sim']
pars : Iterable of str
parameter names
probs : Iterable of probs
desired quantiles
Returns
-------
summaries : OrderedDict of array
This dictionary contains the following arrays indexed by the keys
given below:
- 'msd' : array of shape (num_params, 2) with mean and sd
- 'sem' : array of length num_params with standard error for the mean
- 'c_msd' : array of shape (num_params, 2, num_chains)
- 'quan' : array of shape (num_params, num_quan)
- 'c_quan' : array of shape (num_params, num_quan, num_chains)
- 'ess' : array of shape (num_params, 1)
- 'rhat' : array of shape (num_params, 1)
Note
----
`_summary_sim` has the parameters in *column-major* order whereas `_summary`
gives them in *row-major* order. (This follows RStan.)
"""
# NOTE: this follows RStan rather closely. Some of the calculations here
probs_len = len(probs)
n_chains = len(sim['samples'])
# tidx is a dict with keys that are parameters and values that are their
# indices using column-major ordering
tidx = _pars_total_indexes(sim['pars_oi'], sim['dims_oi'], sim['fnames_oi'], pars)
tidx_colm = [tidx[par] for par in pars]
tidx_colm = list(itertools.chain(*tidx_colm)) # like R's unlist()
tidx_rowm = [tidx[par+'_rowmajor'] for par in pars]
tidx_rowm = list(itertools.chain(*tidx_rowm))
tidx_len = len(tidx_colm)
lmsdq = [_get_par_summary(sim, i, probs) for i in tidx_colm]
msd = np.row_stack([x['msd'] for x in lmsdq])
quan = np.row_stack([x['quan'] for x in lmsdq])
probs_str = tuple(["{:g}%".format(100*p) for p in probs])
msd = msd.reshape(tidx_len, 2, order='F')
quan = quan.reshape(tidx_len, probs_len, order='F')
c_msd = np.row_stack([x['c_msd'] for x in lmsdq])
c_quan = np.row_stack([x['c_quan'] for x in lmsdq])
c_msd = c_msd.reshape(tidx_len, 2, n_chains, order='F')
c_quan = c_quan.reshape(tidx_len, probs_len, n_chains, order='F')
sim_attr_args = sim.get('args', None)
if sim_attr_args is None:
cids = list(range(n_chains))
else:
cids = [x['chain_id'] for x in sim_attr_args]
c_msd_names = dict(parameters=np.asarray(sim['fnames_oi'])[tidx_colm],
stats=("mean", "sd"),
chains=tuple("chain:{}".format(cid) for cid in cids))
c_quan_names = dict(parameters=np.asarray(sim['fnames_oi'])[tidx_colm],
stats=probs_str,
chains=tuple("chain:{}".format(cid) for cid in cids))
ess_and_rhat = np.array([pystan.chains.ess_and_splitrhat(sim, n) for n in tidx_colm])
ess, rhat = [arr.ravel() for arr in np.hsplit(ess_and_rhat, 2)]
return dict(msd=msd, c_msd=c_msd, c_msd_names=c_msd_names, quan=quan,
c_quan=c_quan, c_quan_names=c_quan_names,
sem=msd[:, 1] / np.sqrt(ess), ess=ess, rhat=rhat,
row_major_idx=tidx_rowm, col_major_idx=tidx_colm)
def _get_par_summary(sim, n, probs):
"""Summarize chains merged and individually
Parameters
----------
sim : dict from stanfit object
n : int
parameter index
probs : iterable of int
quantiles
Returns
-------
summary : dict
Dictionary containing summaries
"""
# _get_samples gets chains for nth parameter
ss = _get_samples(n, sim, inc_warmup=False)
msdfun = lambda chain: (np.mean(chain), np.std(chain, ddof=1))
qfun = lambda chain: mquantiles(chain, probs)
c_msd = np.array([msdfun(s) for s in ss]).flatten()
c_quan = np.array([qfun(s) for s in ss]).flatten()
ass = np.asarray(ss).flatten()
msd = np.asarray(msdfun(ass))
quan = qfun(np.asarray(ass))
return dict(msd=msd, quan=quan, c_msd=c_msd, c_quan=c_quan)
def _split_data(data):
data_r = {}
data_i = {}
# data_r and data_i are going to be converted into C++ objects of
# type: map<string, pair<vector<double>, vector<size_t>>> and
# map<string, pair<vector<int>, vector<size_t>>> so prepare
# them accordingly.
for k, v in data.items():
if np.issubdtype(np.asarray(v).dtype, np.integer):
data_i.update({k.encode('utf-8'): np.asarray(v, dtype=int)})
elif np.issubdtype(np.asarray(v).dtype, np.floating):
data_r.update({k.encode('utf-8'): np.asarray(v, dtype=float)})
else:
msg = "Variable {} is neither int nor float nor list/array thereof"
raise ValueError(msg.format(k))
return data_r, data_i
def _config_argss(chains, iter, warmup, thin,
init, seed, sample_file, diagnostic_file, algorithm,
control, **kwargs):
# After rstan/rstan/R/misc.R (config_argss)
iter = int(iter)
if iter < 1:
raise ValueError("`iter` should be a positive integer.")
thin = int(thin)
if thin < 1 or thin > iter:
raise ValueError("`thin should be a positive integer "
"less than `iter`.")
warmup = max(0, int(warmup))
if warmup > iter:
raise ValueError("`warmup` should be an integer less than `iter`.")
chains = int(chains)
if chains < 1:
raise ValueError("`chains` should be a positive integer.")
iters = [iter] * chains
thins = [thin] * chains
warmups = [warmup] * chains
# use chain_id argument if specified
if kwargs.get('chain_id') is None:
chain_id = list(range(chains))
else:
chain_id = [int(id) for id in kwargs['chain_id']]
if len(set(chain_id)) != len(chain_id):
raise ValueError("`chain_id` has duplicated elements.")
chain_id_len = len(chain_id)
if chain_id_len >= chains:
chain_id = chain_id
else:
chain_id = chain_id + [max(chain_id) + 1 + i
for i in range(chains - chain_id_len)]
del kwargs['chain_id']
inits_specified = False
# slight difference here from rstan; Python's lists are not typed.
if isinstance(init, Number):
init = str(init)
if isinstance(init, string_types):
if init in ['0', 'random']:
inits = [init] * chains
else:
inits = ["random"] * chains
inits_specified = True
if not inits_specified and isinstance(init, Callable):
## test if function takes argument named "chain_id"
if "chain_id" in inspect.getargspec(init).args:
inits = [init(chain_id=id) for id in chain_id]
else:
inits = [init()] * chains
if not isinstance(inits[0], dict):
raise ValueError("The function specifying initial values must "
"return a dictionary.")
inits_specified = True
if not inits_specified and isinstance(init, Sequence):
if len(init) != chains:
raise ValueError("Length of list of initial values does not "
"match number of chains.")
if not all([isinstance(d, dict) for d in init]):
raise ValueError("Initial value list is not a sequence of "
"dictionaries.")
inits = init
inits_specified = True
if not inits_specified:
raise ValueError("Invalid specification of initial values.")
## only one seed is needed by virtue of the RNG
seed = _check_seed(seed)
kwargs['method'] = "test_grad" if kwargs.get('test_grad') else 'sampling'
all_control = {
"adapt_engaged", "adapt_gamma", "adapt_delta", "adapt_kappa",
"adapt_t0", "adapt_init_buffer", "adapt_term_buffer", "adapt_window",
"stepsize", "stepsize_jitter", "metric", "int_time",
"max_treedepth", "epsilon", "error", "inv_metric"
}
all_metrics = {"unit_e", "diag_e", "dense_e"}
if control is not None:
if not isinstance(control, dict):
raise ValueError("`control` must be a dictionary")
if not all(key in all_control for key in control):
unknown = set(control) - all_control
raise ValueError("`control` contains unknown parameters: {}".format(unknown))
if control.get('metric') and control['metric'] not in all_metrics:
raise ValueError("`metric` must be one of {}".format(all_metrics))
kwargs['control'] = control
argss = [dict() for _ in range(chains)]
for i in range(chains):
argss[i] = dict(chain_id=chain_id[i],
iter=iters[i], thin=thins[i], seed=seed,
warmup=warmups[i], init=inits[i],
algorithm=algorithm)
if sample_file is not None:
sample_file = _writable_sample_file(sample_file)
if chains == 1:
argss[0]['sample_file'] = sample_file
elif chains > 1:
for i in range(chains):
argss[i]['sample_file'] = _append_id(sample_file, i)
if diagnostic_file is not None:
raise NotImplementedError("diagnostic_file not implemented yet.")
if control is not None and "inv_metric" in control:
inv_metric = control.pop("inv_metric")
metric_dir = tempfile.mkdtemp()
if isinstance(inv_metric, dict):
for i in range(chains):
if i not in inv_metric:
msg = "Invalid value for init_inv_metric found (keys={}). " \
"Use either a dictionary with chain_index as keys (0,1,2,...)" \
"or ndarray."
msg = msg.format(list(metric_file.keys()))
raise ValueError(msg)
mass_values = inv_metric[i]
metric_filename = "inv_metric_chain_{}.Rdata".format(str(i))
metric_path = os.path.join(metric_dir, metric_filename)
if isinstance(mass_values, str):
if not os.path.exists(mass_values):
raise ValueError("inverse metric file was not found: {}".format(mass_values))
shutil.copy(mass_values, metric_path)
else:
stan_rdump(dict(inv_metric=mass_values), metric_path)
argss[i]['metric_file'] = metric_path
elif isinstance(inv_metric, str):
if not os.path.exists(inv_metric):
raise ValueError("inverse metric file was not found: {}".format(inv_metric))
for i in range(chains):
metric_filename = "inv_metric_chain_{}.Rdata".format(str(i))
metric_path = os.path.join(metric_dir, metric_filename)
shutil.copy(inv_metric, metric_path)
argss[i]['metric_file'] = metric_path
elif isinstance(inv_metric, Iterable):
metric_filename = "inv_metric_chain_0.Rdata"
metric_path = os.path.join(metric_dir, metric_filename)
stan_rdump(dict(inv_metric=inv_metric), metric_path)
argss[0]['metric_file'] = metric_path
for i in range(1, chains):
metric_filename = "inv_metric_chain_{}.Rdata".format(str(i))
metric_path = os.path.join(metric_dir, metric_filename)
shutil.copy(argss[i-1]['metric_file'], metric_path)
argss[i]['metric_file'] = metric_path
else:
argss[i]['metric_file'] = ""
stepsize_list = None
if "control" in kwargs and "stepsize" in kwargs["control"]:
if isinstance(kwargs["control"]["stepsize"], Sequence):
stepsize_list = kwargs["control"]["stepsize"]
if len(kwargs["control"]["stepsize"]) == 1:
kwargs["control"]["stepsize"] = kwargs["control"]["stepsize"][0]
elif len(kwargs["control"]["stepsize"]) != chains:
raise ValueError("stepsize length needs to equal chain count.")
else:
stepsize_list = kwargs["control"]["stepsize"]
for i in range(chains):
argss[i].update(kwargs)
if stepsize_list is not None:
argss[i]["control"]["stepsize"] = stepsize_list[i]
argss[i] = _get_valid_stan_args(argss[i])
return argss
def _get_valid_stan_args(base_args=None):
"""Fill in default values for arguments not provided in `base_args`.
RStan does this in C++ in stan_args.hpp in the stan_args constructor.
It seems easier to deal with here in Python.
"""
args = base_args.copy() if base_args is not None else {}
# Default arguments, c.f. rstan/rstan/inst/include/rstan/stan_args.hpp
# values in args are going to be converted into C++ objects so
# prepare them accordingly---e.g., unicode -> bytes -> std::string
args['chain_id'] = args.get('chain_id', 1)
args['append_samples'] = args.get('append_samples', False)
if args.get('method') is None or args['method'] == "sampling":
args['method'] = stan_args_method_t.SAMPLING
elif args['method'] == "optim":
args['method'] = stan_args_method_t.OPTIM
elif args['method'] == 'test_grad':
args['method'] = stan_args_method_t.TEST_GRADIENT
elif args['method'] == 'variational':
args['method'] = stan_args_method_t.VARIATIONAL
else:
args['method'] = stan_args_method_t.SAMPLING
args['sample_file_flag'] = True if args.get('sample_file') else False
args['sample_file'] = args.get('sample_file', '').encode('ascii')
args['diagnostic_file_flag'] = True if args.get('diagnostic_file') else False
args['diagnostic_file'] = args.get('diagnostic_file', '').encode('ascii')
# NB: argument named "seed" not "random_seed"
args['random_seed'] = args.get('seed', int(time.time()))
args['metric_file_flag'] = True if args.get('metric_file') else False
args['metric_file'] = args.get('metric_file', '').encode('ascii')
if args['method'] == stan_args_method_t.VARIATIONAL:
# variational does not use a `control` map like sampling
args['ctrl'] = args.get('ctrl', dict(variational=dict()))
args['ctrl']['variational']['iter'] = args.get('iter', 10000)
args['ctrl']['variational']['grad_samples'] = args.get('grad_samples', 1)
args['ctrl']['variational']['elbo_samples'] = args.get('elbo_samples', 100)
args['ctrl']['variational']['eval_elbo'] = args.get('eval_elbo', 100)
args['ctrl']['variational']['output_samples'] = args.get('output_samples', 1000)
args['ctrl']['variational']['adapt_iter'] = args.get('adapt_iter', 50)
args['ctrl']['variational']['eta'] = args.get('eta', 1.0)
args['ctrl']['variational']['adapt_engaged'] = args.get('adapt_engaged', True)
args['ctrl']['variational']['tol_rel_obj'] = args.get('tol_rel_obj', 0.01)
if args.get('algorithm', '').lower() == 'fullrank':
args['ctrl']['variational']['algorithm'] = variational_algo_t.FULLRANK
else:
args['ctrl']['variational']['algorithm'] = variational_algo_t.MEANFIELD
elif args['method'] == stan_args_method_t.SAMPLING:
args['ctrl'] = args.get('ctrl', dict(sampling=dict()))
args['ctrl']['sampling']['iter'] = iter = args.get('iter', 2000)
args['ctrl']['sampling']['warmup'] = warmup = args.get('warmup', iter // 2)
calculated_thin = iter - warmup // 1000
if calculated_thin < 1:
calculated_thin = 1
args['ctrl']['sampling']['thin'] = thin = args.get('thin', calculated_thin)
args['ctrl']['sampling']['save_warmup'] = True # always True now
args['ctrl']['sampling']['iter_save_wo_warmup'] = iter_save_wo_warmup = 1 + (iter - warmup - 1) // thin
args['ctrl']['sampling']['iter_save'] = iter_save_wo_warmup + 1 + (warmup - 1) // thin
refresh = iter // 10 if iter >= 20 else 1
args['ctrl']['sampling']['refresh'] = args.get('refresh', refresh)
ctrl_lst = args.get('control', dict())
ctrl_sampling = args['ctrl']['sampling']
# NB: if these defaults change, remember to update docstrings
ctrl_sampling['adapt_engaged'] = ctrl_lst.get("adapt_engaged", True)
ctrl_sampling['adapt_gamma'] = ctrl_lst.get("adapt_gamma", 0.05)
ctrl_sampling['adapt_delta'] = ctrl_lst.get("adapt_delta", 0.8)
ctrl_sampling['adapt_kappa'] = ctrl_lst.get("adapt_kappa", 0.75)
ctrl_sampling['adapt_t0'] = ctrl_lst.get("adapt_t0", 10.0)
ctrl_sampling['adapt_init_buffer'] = ctrl_lst.get("adapt_init_buffer", 75)
ctrl_sampling['adapt_term_buffer'] = ctrl_lst.get("adapt_term_buffer", 50)
ctrl_sampling['adapt_window'] = ctrl_lst.get("adapt_window", 25)
ctrl_sampling['stepsize'] = ctrl_lst.get("stepsize", 1.0)
ctrl_sampling['stepsize_jitter'] = ctrl_lst.get("stepsize_jitter", 0.0)
algorithm = args.get('algorithm', 'NUTS')
if algorithm == 'HMC':
args['ctrl']['sampling']['algorithm'] = sampling_algo_t.HMC
elif algorithm == 'Metropolis':
args['ctrl']['sampling']['algorithm'] = sampling_algo_t.Metropolis
elif algorithm == 'NUTS':
args['ctrl']['sampling']['algorithm'] = sampling_algo_t.NUTS
elif algorithm == 'Fixed_param':
args['ctrl']['sampling']['algorithm'] = sampling_algo_t.Fixed_param
# TODO: Setting adapt_engaged to False solves the segfault reported
# in issue #200; find out why this hack is needed. RStan deals with
# the setting elsewhere.
ctrl_sampling['adapt_engaged'] = False
else:
msg = "Invalid value for parameter algorithm (found {}; " \
"require HMC, Metropolis, NUTS, or Fixed_param).".format(algorithm)
raise ValueError(msg)
metric = ctrl_lst.get('metric', 'diag_e')
if metric == "unit_e":
ctrl_sampling['metric'] = sampling_metric_t.UNIT_E
elif metric == "diag_e":
ctrl_sampling['metric'] = sampling_metric_t.DIAG_E
elif metric == "dense_e":
ctrl_sampling['metric'] = sampling_metric_t.DENSE_E
if ctrl_sampling['algorithm'] == sampling_algo_t.NUTS:
ctrl_sampling['max_treedepth'] = ctrl_lst.get("max_treedepth", 10)
elif ctrl_sampling['algorithm'] == sampling_algo_t.HMC:
ctrl_sampling['int_time'] = ctrl_lst.get('int_time', 6.283185307179586476925286766559005768e+00)
elif ctrl_sampling['algorithm'] == sampling_algo_t.Metropolis:
pass
elif ctrl_sampling['algorithm'] == sampling_algo_t.Fixed_param:
pass
elif args['method'] == stan_args_method_t.OPTIM:
args['ctrl'] = args.get('ctrl', dict(optim=dict()))
args['ctrl']['optim']['iter'] = iter = args.get('iter', 2000)
algorithm = args.get('algorithm', 'LBFGS')
if algorithm == "BFGS":
args['ctrl']['optim']['algorithm'] = optim_algo_t.BFGS
elif algorithm == "Newton":
args['ctrl']['optim']['algorithm'] = optim_algo_t.Newton
elif algorithm == "LBFGS":
args['ctrl']['optim']['algorithm'] = optim_algo_t.LBFGS
else:
msg = "Invalid value for parameter algorithm (found {}; " \
"require (L)BFGS or Newton).".format(algorithm)
raise ValueError(msg)
refresh = args['ctrl']['optim']['iter'] // 100
args['ctrl']['optim']['refresh'] = args.get('refresh', refresh)
if args['ctrl']['optim']['refresh'] < 1:
args['ctrl']['optim']['refresh'] = 1
args['ctrl']['optim']['init_alpha'] = args.get("init_alpha", 0.001)
args['ctrl']['optim']['tol_obj'] = args.get("tol_obj", 1e-12)
args['ctrl']['optim']['tol_grad'] = args.get("tol_grad", 1e-8)
args['ctrl']['optim']['tol_param'] = args.get("tol_param", 1e-8)
args['ctrl']['optim']['tol_rel_obj'] = args.get("tol_rel_obj", 1e4)
args['ctrl']['optim']['tol_rel_grad'] = args.get("tol_rel_grad", 1e7)
args['ctrl']['optim']['save_iterations'] = args.get("save_iterations", True)
args['ctrl']['optim']['history_size'] = args.get("history_size", 5)
elif args['method'] == stan_args_method_t.TEST_GRADIENT:
args['ctrl'] = args.get('ctrl', dict(test_grad=dict()))
args['ctrl']['test_grad']['epsilon'] = args.get("epsilon", 1e-6)
args['ctrl']['test_grad']['error'] = args.get("error", 1e-6)
init = args.get('init', "random")
if isinstance(init, string_types):
args['init'] = init.encode('ascii')
elif isinstance(init, dict):
args['init'] = "user".encode('ascii')
# while the name is 'init_list', it is a dict; the name comes from rstan,
# where list elements can have names
args['init_list'] = init
else:
args['init'] = "random".encode('ascii')
args['init_radius'] = args.get('init_r', 2.0)
if (args['init_radius'] <= 0):
args['init'] = b"0"
# 0 initialization requires init_radius = 0
if (args['init'] == b"0" or args['init'] == 0):
args['init_radius'] = 0.0
args['enable_random_init'] = args.get('enable_random_init', True)
# RStan calls validate_args() here
return args
def _check_seed(seed):
"""If possible, convert `seed` into a valid form for Stan (an integer
between 0 and MAX_UINT, inclusive). If not possible, use a random seed
instead and raise a warning if `seed` was not provided as `None`.
"""
if isinstance(seed, (Number, string_types)):
try:
seed = int(seed)
except ValueError:
logger.warning("`seed` must be castable to an integer")
seed = None
else:
if seed < 0:
logger.warning("`seed` may not be negative")
seed = None
elif seed > MAX_UINT:
raise ValueError('`seed` is too large; max is {}'.format(MAX_UINT))
elif isinstance(seed, np.random.RandomState):
seed = seed.randint(0, MAX_UINT)
elif seed is not None:
logger.warning('`seed` has unexpected type')
seed = None
if seed is None:
seed = random.randint(0, MAX_UINT)
return seed
def _organize_inits(inits, pars, dims):
"""Obtain a list of initial values for each chain.
The parameter 'lp__' will be removed from the chains.
Parameters
----------
inits : list
list of initial values for each chain.
pars : list of str
dims : list of list of int
from (via cython conversion) vector[vector[uint]] dims
Returns
-------
inits : list of dict
"""
try:
idx_of_lp = pars.index('lp__')
del pars[idx_of_lp]
del dims[idx_of_lp]
except ValueError:
pass
starts = _calc_starts(dims)
return [_par_vector2dict(init, pars, dims, starts) for init in inits]
def _calc_starts(dims):
"""Calculate starting indexes
Parameters
----------
dims : list of list of int
from (via cython conversion) vector[vector[uint]] dims
Examples
--------
>>> _calc_starts([[8, 2], [5], [6, 2]])
[0, 16, 21]
"""
# NB: Python uses 0-indexing; R uses 1-indexing.
l = len(dims)
s = [np.prod(d) for d in dims]
starts = np.cumsum([0] + s)[0:l].tolist()
# coerce things into ints before returning
return [int(i) for i in starts]
def _par_vector2dict(v, pars, dims, starts=None):
"""Turn a vector of samples into an OrderedDict according to param dims.
Parameters
----------
y : list of int or float
pars : list of str
parameter names
dims : list of list of int
list of dimensions of parameters
Returns
-------
d : dict
Examples
--------
>>> v = list(range(31))
>>> dims = [[5], [5, 5], []]
>>> pars = ['mu', 'Phi', 'eta']
>>> _par_vector2dict(v, pars, dims) # doctest: +ELLIPSIS
OrderedDict([('mu', array([0, 1, 2, 3, 4])), ('Phi', array([[ 5, ...
"""
if starts is None:
starts = _calc_starts(dims)
d = OrderedDict()
for i in range(len(pars)):
l = int(np.prod(dims[i]))
start = starts[i]
end = start + l
y = np.asarray(v[start:end])
if len(dims[i]) > 1:
y = y.reshape(dims[i], order='F') # 'F' = Fortran, column-major
d[pars[i]] = y.squeeze() if y.shape == (1,) else y
return d
def _check_pars(allpars, pars):
if len(pars) == 0:
raise ValueError("No parameter specified (`pars` is empty).")
for par in pars:
if par not in allpars:
raise ValueError("No parameter {}".format(par))
def _pars_total_indexes(names, dims, fnames, pars):
"""Obtain all the indexes for parameters `pars` in the sequence of names.
`names` references variables that are in column-major order
Parameters
----------
names : sequence of str
All the parameter names.
dim : sequence of list of int
Dimensions, in same order as `names`.
fnames : sequence of str
All the scalar parameter names
pars : sequence of str
The parameters of interest. It is assumed all elements in `pars` are in
`names`.
Returns
-------
indexes : OrderedDict of list of int
Dictionary uses parameter names as keys. Indexes are column-major order.
For each parameter there is also a key `par`+'_rowmajor' that stores the
row-major indexing.
Note
----
Inside each parameter (vector or array), the sequence uses column-major
ordering. For example, if we have parameters alpha and beta, having
dimensions [2, 2] and [2, 3] respectively, the whole parameter sequence
is alpha[0,0], alpha[1,0], alpha[0, 1], alpha[1, 1], beta[0, 0],
beta[1, 0], beta[0, 1], beta[1, 1], beta[0, 2], beta[1, 2]. In short,
like R matrix(..., bycol=TRUE).
Example
-------
>>> pars_oi = ['mu', 'tau', 'eta', 'theta', 'lp__']
>>> dims_oi = [[], [], [8], [8], []]
>>> fnames_oi = ['mu', 'tau', 'eta[1]', 'eta[2]', 'eta[3]', 'eta[4]',
... 'eta[5]', 'eta[6]', 'eta[7]', 'eta[8]', 'theta[1]', 'theta[2]',
... 'theta[3]', 'theta[4]', 'theta[5]', 'theta[6]', 'theta[7]',
... 'theta[8]', 'lp__']
>>> pars = ['mu', 'tau', 'eta', 'theta', 'lp__']
>>> _pars_total_indexes(pars_oi, dims_oi, fnames_oi, pars)
... # doctest: +ELLIPSIS
OrderedDict([('mu', (0,)), ('tau', (1,)), ('eta', (2, 3, ...
"""
starts = _calc_starts(dims)
def par_total_indexes(par):
# if `par` is a scalar, it will match one of `fnames`
if par in fnames:
p = fnames.index(par)
idx = tuple([p])
return OrderedDict([(par, idx), (par+'_rowmajor', idx)])
else:
p = names.index(par)
idx = starts[p] + np.arange(np.prod(dims[p]))
idx_rowmajor = starts[p] + _idx_col2rowm(dims[p])
return OrderedDict([(par, tuple(idx)), (par+'_rowmajor', tuple(idx_rowmajor))])
indexes = OrderedDict()
for par in pars:
indexes.update(par_total_indexes(par))
return indexes
def _idx_col2rowm(d):
"""Generate indexes to change from col-major to row-major ordering"""
if 0 == len(d):
return 1
if 1 == len(d):
return np.arange(d[0])
# order='F' indicates column-major ordering
idx = np.array(np.arange(np.prod(d))).reshape(d, order='F').T
return idx.flatten(order='F')
def _get_kept_samples(n, sim):
"""Get samples to be kept from the chain(s) for `n`th parameter.
Samples from different chains are merged.
Parameters
----------
n : int
sim : dict
A dictionary tied to a StanFit4Model instance.
Returns
-------
samples : array
Samples being kept, permuted and in column-major order.
"""
return pystan._misc.get_kept_samples(n, sim)
def _get_samples(n, sim, inc_warmup=True):
# NOTE: this is in stanfit-class.R in RStan (rather than misc.R)
"""Get chains for `n`th parameter.
Parameters
----------
n : int
sim : dict
A dictionary tied to a StanFit4Model instance.
Returns
-------
chains : list of array
Each chain is an element in the list.
"""
return pystan._misc.get_samples(n, sim, inc_warmup)
def _redirect_stderr():
"""Redirect stderr for subprocesses to /dev/null
Silences copious compilation messages.
Returns
-------
orig_stderr : file descriptor
Copy of original stderr file descriptor
"""
sys.stderr.flush()
stderr_fileno = sys.stderr.fileno()
orig_stderr = os.dup(stderr_fileno)
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, stderr_fileno)
os.close(devnull)
return orig_stderr
def _has_fileno(stream):
"""Returns whether the stream object seems to have a working fileno()
Tells whether _redirect_stderr is likely to work.
Parameters
----------
stream : IO stream object
Returns
-------
has_fileno : bool
True if stream.fileno() exists and doesn't raise OSError or
UnsupportedOperation
"""
try:
stream.fileno()
except (AttributeError, OSError, IOError, io.UnsupportedOperation):
return False
return True
def _append_id(file, id, suffix='.csv'):
fname = os.path.basename(file)
fpath = os.path.dirname(file)
fname2 = re.sub(r'\.csv\s*$', '_{}.csv'.format(id), fname)
if fname2 == fname:
fname2 = '{}_{}.csv'.format(fname, id)
return os.path.join(fpath, fname2)
def _writable_sample_file(file, warn=True, wfun=None):
"""Check to see if file is writable, if not use temporary file"""
if wfun is None:
wfun = lambda x, y: '"{}" is not writable; use "{}" instead'.format(x, y)
dir = os.path.dirname(file)
dir = os.getcwd() if dir == '' else dir
if os.access(dir, os.W_OK):
return file
else:
dir2 = tempfile.mkdtemp()
if warn:
logger.warning(wfun(dir, dir2))
return os.path.join(dir2, os.path.basename(file))
def is_legal_stan_vname(name):
stan_kw1 = ('for', 'in', 'while', 'repeat', 'until', 'if', 'then', 'else',
'true', 'false')
stan_kw2 = ('int', 'real', 'vector', 'simplex', 'ordered', 'positive_ordered',
'row_vector', 'matrix', 'corr_matrix', 'cov_matrix', 'lower', 'upper')
stan_kw3 = ('model', 'data', 'parameters', 'quantities', 'transformed', 'generated')
cpp_kw = ("alignas", "alignof", "and", "and_eq", "asm", "auto", "bitand", "bitor", "bool",
"break", "case", "catch", "char", "char16_t", "char32_t", "class", "compl",
"const", "constexpr", "const_cast", "continue", "decltype", "default", "delete",
"do", "double", "dynamic_cast", "else", "enum", "explicit", "export", "extern",
"false", "float", "for", "friend", "goto", "if", "inline", "int", "long", "mutable",
"namespace", "new", "noexcept", "not", "not_eq", "nullptr", "operator", "or", "or_eq",
"private", "protected", "public", "register", "reinterpret_cast", "return",
"short", "signed", "sizeof", "static", "static_assert", "static_cast", "struct",
"switch", "template", "this", "thread_local", "throw", "true", "try", "typedef",
"typeid", "typename", "union", "unsigned", "using", "virtual", "void", "volatile",
"wchar_t", "while", "xor", "xor_eq")
illegal = stan_kw1 + stan_kw2 + stan_kw3 + cpp_kw
if re.findall(r'(\.|^[0-9]|__$)', name):
return False
return not name in illegal
def _dict_to_rdump(data):
parts = []
for name, value in data.items():
if isinstance(value, (Sequence, Number, np.number, np.ndarray, int, bool, float)) \
and not isinstance(value, string_types):
value = np.asarray(value)
else:
raise ValueError("Variable {} is not a number and cannot be dumped.".format(name))
if value.dtype == np.bool:
value = value.astype(int)
if value.ndim == 0:
s = '{} <- {}\n'.format(name, str(value))
elif value.ndim == 1:
s = '{} <-\nc({})\n'.format(name, ', '.join(str(v) for v in value))
elif value.ndim > 1:
tmpl = '{} <-\nstructure(c({}), .Dim = c({}))\n'
# transpose value as R uses column-major
# 'F' = Fortran, column-major
s = tmpl.format(name,
', '.join(str(v) for v in value.flatten(order='F')),
', '.join(str(v) for v in value.shape))
parts.append(s)
return ''.join(parts)
def stan_rdump(data, filename):
"""
Dump a dictionary with model data into a file using the R dump format that
Stan supports.
Parameters
----------
data : dict
filename : str
"""
for name in data:
if not is_legal_stan_vname(name):
raise ValueError("Variable name {} is not allowed in Stan".format(name))
with open(filename, 'w') as f:
f.write(_dict_to_rdump(data))
def _rdump_value_to_numpy(s):
"""
Convert a R dump formatted value to Numpy equivalent
For example, "c(1, 2)" becomes ``array([1, 2])``
Only supports a few R data structures. Will not work with European decimal format.
"""
if "structure" in s:
vector_str, shape_str = re.findall(r'c\([^\)]+\)', s)
shape = [int(d) for d in shape_str[2:-1].split(',')]
if '.' in vector_str:
arr = np.array([float(v) for v in vector_str[2:-1].split(',')])
else:
arr = np.array([int(v) for v in vector_str[2:-1].split(',')])
# 'F' = Fortran, column-major
arr = arr.reshape(shape, order='F')
elif "c(" in s:
if '.' in s:
arr = np.array([float(v) for v in s[2:-1].split(',')], order='F')
else:
arr = np.array([int(v) for v in s[2:-1].split(',')], order='F')
else:
arr = np.array(float(s) if '.' in s else int(s))
return arr
def _remove_empty_pars(pars, pars_oi, dims_oi):
"""
Remove parameters that are actually empty. For example, the parameter
y would be removed with the following model code:
transformed data { int n; n <- 0; }
parameters { real y[n]; }
Parameters
----------
pars: iterable of str
pars_oi: list of str
dims_oi: list of list of int
Returns
-------
pars_trimmed: list of str
"""
pars = list(pars)
for par, dim in zip(pars_oi, dims_oi):
if par in pars and np.prod(dim) == 0:
del pars[pars.index(par)]
return pars
def read_rdump(filename):
"""
Read data formatted using the R dump format
Parameters
----------
filename: str
Returns
-------
data : OrderedDict
"""
contents = open(filename).read().strip()
names = [name.strip() for name in re.findall(r'^(\w+) <-', contents, re.MULTILINE)]
values = [value.strip() for value in re.split('\w+ +<-', contents) if value]
if len(values) != len(names):
raise ValueError("Unable to read file. Unable to pair variable name with value.")
d = OrderedDict()
for name, value in zip(names, values):
d[name.strip()] = _rdump_value_to_numpy(value.strip())
return d
def to_dataframe(fit, pars=None, permuted=False, dtypes=None, inc_warmup=False, diagnostics=True, header=True):
"""Extract samples as a pandas dataframe for different parameters.
Parameters
----------
pars : {str, sequence of str}
parameter (or quantile) name(s).
permuted : bool
If True, returned samples are permuted.
If inc_warmup is True, warmup samples have negative order.
dtypes : dict
datatype of parameter(s).
If nothing is passed, float will be used for all parameters.
inc_warmup : bool
If True, warmup samples are kept; otherwise they are
discarded.
diagnostics : bool
If True, include hmc diagnostics in dataframe.
header : bool
If True, include header columns.
Returns
-------
df : pandas dataframe
Returned dataframe contains: [header_df]|[draws_df]|[diagnostics_df],
where all groups are optional.
To exclude draws_df use `pars=[]`.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("Pandas module not found. You can install pandas with: pip install pandas")
fit._verify_has_samples()
pars_original = pars
if pars is None:
pars = fit.sim['pars_oi']
elif isinstance(pars, string_types):
pars = [pars]
if pars:
pars = pystan.misc._remove_empty_pars(pars, fit.sim['pars_oi'], fit.sim['dims_oi'])
allpars = fit.sim['pars_oi'] + fit.sim['fnames_oi']
_check_pars(allpars, pars)
if dtypes is None:
dtypes = {}
n_kept = [s if inc_warmup else s-w for s, w in zip(fit.sim['n_save'], fit.sim['warmup2'])]
chains = len(fit.sim['samples'])
diagnostic_type = {'divergent__':int,
'energy__':float,
'treedepth__':int,
'accept_stat__':float,
'stepsize__':float,
'n_leapfrog__':int}
header_dict = OrderedDict()
if header:
idx = np.concatenate([np.full(n_kept[chain], chain, dtype=int) for chain in range(chains)])
warmup = [np.zeros(n_kept[chain], dtype=np.int64) for chain in range(chains)]
if inc_warmup:
draw = []
for chain, w in zip(range(chains), fit.sim['warmup2']):
warmup[chain][:w] = 1
draw.append(np.arange(n_kept[chain], dtype=np.int64) - w)
draw = np.concatenate(draw)
else:
draw = np.concatenate([np.arange(n_kept[chain], dtype=np.int64) for chain in range(chains)])
warmup = np.concatenate(warmup)
header_dict = OrderedDict(zip(['chain', 'draw', 'warmup'], [idx, draw, warmup]))
if permuted:
if inc_warmup:
chain_permutation = []
chain_permutation_order = []
permutation = []
permutation_order = []
for chain, p, w in zip(range(chains), fit.sim['permutation'], fit.sim['warmup2']):
chain_permutation.append(list(range(-w, 0)) + p)
chain_permutation_order.append(list(range(-w, 0)) + list(np.argsort(p)))
permutation.append(sum(n_kept[:chain])+chain_permutation[-1]+w)
permutation_order.append(sum(n_kept[:chain])+chain_permutation_order[-1]+w)
chain_permutation = np.concatenate(chain_permutation)
chain_permutation_order = np.concatenate(chain_permutation_order)
permutation = np.concatenate(permutation)
permutation_order = np.concatenate(permutation_order)
else:
chain_permutation = np.concatenate(fit.sim['permutation'])
chain_permutation_order = np.concatenate([np.argsort(item) for item in fit.sim['permutation']])
permutation = np.concatenate([sum(n_kept[:chain])+p for chain, p in enumerate(fit.sim['permutation'])])
permutation_order = np.argsort(permutation)
header_dict["permutation"] = permutation
header_dict["chain_permutation"] = chain_permutation
header_dict["permutation_order"] = permutation_order
header_dict["chain_permutation_order"] = chain_permutation_order
if header:
header_df = pd.DataFrame.from_dict(header_dict)
else:
if permuted:
header_df = pd.DataFrame.from_dict({"permutation_order" : header_dict["permutation_order"]})
else:
header_df = | pd.DataFrame() | pandas.DataFrame |
from urllib.parse import urlparse
import pytest
import pandas as pd
import numpy as np
from visions.core.implementations.types import *
from visions.application.summaries.summary import CompleteSummary
@pytest.fixture(scope="class")
def summary():
return CompleteSummary()
def validate_summary_output(test_series, visions_type, correct_output, summary):
trial_output = summary.summarize_series(test_series, visions_type)
for metric, result in correct_output.items():
assert metric in trial_output, "Metric `{metric}` is missing".format(
metric=metric
)
assert (
trial_output[metric] == result
), "Expected value {result} for metric `{metric}`, got {output}".format(
result=result, metric=metric, output=trial_output[metric]
)
def test_integer_summary(summary, visions_type=visions_integer):
test_series = | pd.Series([0, 1, 2, 3, 4]) | pandas.Series |
import pandas as pd
import geopandas as gpd
import numpy as np
from rasterstats import zonal_stats
import bisect
import requests
import tempfile
import io
from io import BytesIO
from . import parameters as pr
class Power():
def __init__(self,EUSES, **kwargs):
ds = EUSES.ds
year = EUSES.year
time_range = ds.coords['time']
load_excel = pd.read_excel('https://eepublicdownloads.blob.core.windows.net/public-cdn-container/clean-documents/Publications/Statistics/Monthly-hourly-load-values_2006-2015.xlsx', header=3)
ds['power'] = (('nuts_2','time'),(np.array([[t*0.0 for t in range(len(time_range))]]*len(ds.coords['nuts_2']))))
ds['power'].attrs['unit'] = 'MW'
def entsoe_hourly(id,year):
if id == 'UK':
id = 'GB'
n_rows = int(len(time_range) / 24)
load_ger_range = load_excel.query('Year == {} & Country == "{}"'.format(year,id))
load_sep = load_ger_range.drop(['Country','Year','Month','Day','Coverage ratio'], axis=1)[0:n_rows]
load_profile = pd.DataFrame()
for i, row in load_sep.iterrows():
load_profile = pd.concat([load_profile,row])
load_profile.columns = ['load_in_MW']
load_profile.index = time_range.values
return load_profile.fillna(load_profile.load_in_MW.mean())
for c in EUSES.countries:
id = pr.get_metadata(c,'renewables_nj_id')
ds_c = EUSES.filter_countries([c]).ds
# ds_c = ds.where(ds['country_code'] == id, drop = True)
load_profile = entsoe_hourly(id,year)
population_sum = ds_c['population'].sum().item()
for nuts_2_id in ds_c.coords['nuts_2']:
power_profile = [round(ds_c['population'].loc[nuts_2_id].values.item()/population_sum * int(x),3) for x in load_profile.load_in_MW]
ds['power'].loc[nuts_2_id] = power_profile
class Heat():
def __init__(self,EUSES, decentralized=False, **kwargs):
temp = tempfile.TemporaryDirectory()
ds = EUSES.ds
year = EUSES.year
time_range = ds.coords['time']
r = requests.get('https://gitlab.com/hotmaps/heat/heat_tot_curr_density/-/raw/master/data/heat_tot_curr_density.tif')
hd_path = temp.name+'/heat_tot_curr_density.tif'
open(hd_path, 'wb').write(r.content)
r = requests.get('https://gitlab.com/hotmaps/space_heating_cooling_dhw_demand/-/raw/master/data/space_heating_cooling_dhw_top-down.csv')
hotmaps_volumes = pd.read_csv(io.StringIO(r.content.decode('utf-8')), sep=r"|")
def heating_volumes():
sectors = ['residential','service']
end_uses = ['space_heating','hot_water']
for sector, end_use in dict(zip(sectors,[end_uses,end_uses])).items():
for eu in end_use:
ds[sector+'_'+eu] = (('nuts_2',),(np.array([0.0]*len(ds.coords['nuts_2']))))
for c in EUSES.countries:
id = pr.get_metadata(c,'nuts_id')
hotmaps_id = pr.get_metadata(c,'nuts_id')
similar_countries = {'HR':["AL", "MK", "ME"], 'LU':["CH"], 'SE': ["NO"], 'EE': ['EE00']}
if pr.get_metadata(c,'hotmaps_id') == None:
hotmaps_id = pr.get_metadata(c,'nuts_id')
for replacement, country in similar_countries.items():
if hotmaps_id in country:
hotmaps_id = replacement
sh_dhw = hotmaps_volumes.loc[hotmaps_volumes.country_code == hotmaps_id.lower()]
total_heat_ued = sh_dhw.query('topic == "Total useful heating demand - residential and service sector [TWh/y]"').value.iloc[0]
ds_c = EUSES.filter_countries([c]).ds
for sector, end_use in dict(zip(sectors,[end_uses,end_uses])).items():
sh_share = sh_dhw.query('feature == "Total useful heating demand, per country - '+ sector +' sector [TWh/y]"').value.iloc[0]/total_heat_ued
hw_share = sh_dhw.query('feature == "Total useful DHW demand, per country - '+ sector +' sector [TWh/y]"').value.iloc[0]/total_heat_ued
ds_c[sector+'_space_heating'].loc[:] = sh_share
ds_c[sector+'_hot_water'].loc[:] = hw_share
for sector, end_use in dict(zip(sectors,[end_uses,end_uses])).items():
for eu in end_use:
for nuts_2_id in ds_c.coords['nuts_2']:
heat_ued = zonal_stats(ds['geometry'].loc[nuts_2_id].values.item(), hd_path, stats='sum')[0].get('sum') # MWh/year
ds[sector+'_'+eu].loc[nuts_2_id] = ds_c[sector+'_'+eu].loc[nuts_2_id] * heat_ued
def space_heating():
r = requests.get('https://gitlab.com/hotmaps/load_profile/load_profile_tertiary_heating_generic/-/raw/master/data/hotmaps_task_2.7_load_profile_tertiary_heating_generic.csv')
hotmaps_profile_tert_heat = pd.read_csv(io.StringIO(r.content.decode('utf-8')))
r = requests.get('https://gitlab.com/hotmaps/load_profile/load_profile_residential_heating_generic/-/raw/master/data/hotmaps_task_2.7_load_profile_residential_heating_generic.csv')
hotmaps_profile_resid_heat = pd.read_csv(io.StringIO(r.content.decode('utf-8')))
space_heating_dic = {
"residential" : hotmaps_profile_resid_heat,
"service" : hotmaps_profile_tert_heat,
}
for sector,generic_profile in space_heating_dic.items():
residential_space_heating = []
service_space_heating = []
for nuts2_id in ds.coords['nuts_2'].values:
nuts0_id = ds.sel(nuts_2=nuts2_id)['country_code'].values.item()
hotmaps_id = nuts0_id
similar_countries = {'SE':["NO"], 'LU':["CH"]}
for replacement, country in similar_countries.items():
if nuts0_id in country:
hotmaps_id = replacement
similar_countries = {'HR':["AL", "MK", "ME"], 'EE00': ['EE'], 'GR':["EL"]}
for replacement, country in similar_countries.items():
if nuts0_id in country:
nuts0_id = replacement
temperature_to_load = ds['temperature'].loc[nuts0_id].to_dataframe()
temperature_to_load['hour'] = temperature_to_load.index.hour
generic_profile.hour = generic_profile.hour.replace(24,0)
gp_nuts_id = generic_profile.loc[generic_profile['NUTS2_code'].str.contains(hotmaps_id)]
for i in range(0,24):
grades = gp_nuts_id.query('hour == {}'.format(i)).load
breakpoints = gp_nuts_id.query('hour == {}'.format(i)).temperature
breakpoints = breakpoints.drop(index=breakpoints[-1:].index)
score_series = temperature_to_load.query('hour=={}'.format(i))['temperature']
def grade(score, breakpoints=breakpoints.tolist() , grades=grades.tolist()):
i = bisect.bisect(breakpoints, score)
return grades[i]
index_filter = temperature_to_load.query('hour == {}'.format(i)).index
temperature_to_load.loc[index_filter,'load'] = pd.Series(index=index_filter.tolist(),data=[grade(score) for score in score_series.tolist()])
heat_volume = ds[sector+'_space_heating'].loc[nuts2_id].values.item()
profile = (temperature_to_load.load/temperature_to_load.load.sum()*heat_volume).round(4)
eval(sector+'_space_heating').append(profile.to_list())
ds[sector+'_space_heating_profile'] = (('nuts_2','time'),(np.array(eval(sector+'_space_heating'))))
ds[sector+'_space_heating_profile'].attrs['unit'] = 'MW'
def hot_water():
# create dataframe with seasons
season_df = pd.DataFrame()
season_list = [
{
'season_name': 'winter',
'number': 1,
'start_date': '/01/01',
'end_date': '/12/31 23:00'
},
{
'season_name': 'fall & spring',
'number': 1,
'start_date': '/03/01',
'end_date': '/11/30'
},
{
'season_name': 'summer',
'number': 0,
'start_date': '/06/01',
'end_date': '/08/31'
},
]
r = requests.get('https://gitlab.com/hotmaps/load_profile/load_profile_residential_shw_generic/-/raw/master/data/hotmaps_task_2.7_load_profile_residential_shw_generic.csv')
hotmaps_profile_resid_shw = pd.read_csv(io.StringIO(r.content.decode('utf-8')))
r = requests.get('https://gitlab.com/hotmaps/load_profile/load_profile_tertiary_shw_generic/-/raw/master/data/hotmaps_task_2.7_load_profile_tertiary_shw_generic.csv')
hotmaps_profile_ter_shw = pd.read_csv(io.StringIO(r.content.decode('utf-8')))
hot_water_dic = {
"residential" : hotmaps_profile_resid_shw,
"service" : hotmaps_profile_ter_shw,
}
for i in season_list:
df = pd.DataFrame()
season_time_index = pd.date_range(str(year) + i.get('start_date'), str(year) + i.get('end_date'),
freq='H')
df['season'] = pd.Series([i.get('number')] * len(season_time_index), season_time_index)
if i.get('season_name') == 'winter':
season_df = | pd.concat([df, season_df]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Methods to perform coverage analysis.
@author: <NAME> <<EMAIL>>
"""
import pandas as pd
import numpy as np
import geopandas as gpd
from typing import List, Optional
from shapely import geometry as geo
from datetime import datetime, timedelta
from skyfield.api import load, wgs84, EarthSatellite
from ..schemas.point import Point
from ..schemas.satellite import Satellite
from ..schemas.instrument import Instrument, DutyCycleScheme
from ..utils import (
compute_min_altitude,
swath_width_to_field_of_regard,
compute_max_access_time,
compute_orbit_period,
)
def collect_observations(
point: Point,
satellite: Satellite,
instrument: Instrument,
start: datetime,
end: datetime,
omit_solar: bool = True,
sample_distance: Optional[float] = None,
) -> gpd.GeoDataFrame:
"""
Collect single satellite observations of a geodetic point of interest.
:param point: The ground point of interest
:type point: :class:`tatc.schemas.point.Point`
:param satellite: The observing satellite
:type satellite: :class:`tatc.schemas.satellite.Satellite`
:param instrument: The instrument used to make observations
:type instrument::`tatc.schemas.instrument.instrument`
:param start: The start of the mission window
:type start::`datetime.datetime`
:param end: The end of the mission window
:type end::`datetime.datetime`
:param omit_solar: True, if solar angles should be omitted
to improve computational efficiency, defaults to True
:type omit_solar: bool, optional
:param sample_distance: Ground sample distance (m) to override
instrument field of regard, defaults to None
:type sample_distance: int, optional
:return: An instance of :class:`geopandas.GeoDataFrame` containing all
recorded reduce_observations
:rtype::`geopandas.GeoDataFrame`
"""
# build a topocentric point at the designated geodetic point
topos = wgs84.latlon(point.latitude, point.longitude)
# load the timescale and define starting and ending points
ts = load.timescale()
t0 = ts.from_datetime(start)
t1 = ts.from_datetime(end)
# load the ephemerides
eph = load("de421.bsp")
# convert orbit to tle
orbit = satellite.orbit.to_tle()
# construct a satellite for propagation
sat = EarthSatellite(orbit.tle[0], orbit.tle[1], satellite.name)
# compute the initial satellite height (altitude)
satellite_height = wgs84.subpoint(sat.at(t0)).elevation.m
# compute the minimum altitude angle required for observation
min_altitude = compute_min_altitude(
satellite_height,
instrument.field_of_regard
if sample_distance is None
else swath_width_to_field_of_regard(satellite_height, sample_distance),
)
# compute the maximum access time to filter bad data
max_access_time = timedelta(
seconds=compute_max_access_time(satellite_height, min_altitude)
)
# TODO: consider instrument operational intervals
ops_intervals = pd.Series(
[pd.Interval(pd.Timestamp(start), pd.Timestamp(end), "both")]
)
# find the set of observation events
t, events = sat.find_events(topos, t0, t1, altitude_degrees=min_altitude)
if omit_solar:
# basic dataframe without solar angles
df = pd.DataFrame(
{
"point_id": pd.Series([], dtype="int"),
"geometry": pd.Series([], dtype="object"),
"satellite": pd.Series([], dtype="str"),
"instrument": pd.Series([], dtype="str"),
"start": pd.Series([], dtype="datetime64[ns, utc]"),
"end": pd.Series([], dtype="datetime64[ns, utc]"),
"epoch": pd.Series([], dtype="datetime64[ns, utc]"),
"sat_alt": pd.Series([], dtype="float64"),
"sat_az": pd.Series([], dtype="float64"),
}
)
else:
# extended dataframe including solar angles
df = pd.DataFrame(
{
"point_id": pd.Series([], dtype="int"),
"geometry": pd.Series([], dtype="object"),
"satellite": pd.Series([], dtype="str"),
"instrument": pd.Series([], dtype="str"),
"start": pd.Series([], dtype="datetime64[ns, utc]"),
"end": pd.Series([], dtype="datetime64[ns, utc]"),
"epoch": pd.Series([], dtype="datetime64[ns, utc]"),
"sat_alt": pd.Series([], dtype="float64"),
"sat_az": pd.Series([], dtype="float64"),
"sat_sunlit": pd.Series([], dtype="bool"),
"solar_alt": pd.Series([], dtype="float64"),
"solar_az": pd.Series([], dtype="float64"),
"solar_time": pd.Series([], dtype="float64"),
}
)
# define variables for stepping through the events list
t_rise = None
t_culminate = None
sat_sunlit = None
solar_time = None
sat_alt = None
sat_az = None
solar_alt = None
solar_az = None
# check for geocentricity
if np.all(events == 1) and events:
# find the satellite altitude, azimuth, and distance at t0
sat_alt, sat_az, sat_dist = (sat - topos).at(t[0]).altaz()
# if ommiting solar angles
if omit_solar:
df = pd.concat([
df,
pd.DataFrame.from_records(
{
"point_id": point.id,
"geometry": geo.Point(point.longitude, point.latitude),
"satellite": satellite.name,
"instrument": instrument.name,
"start": start,
"epoch": start + (end - start) / 2,
"end": end,
"sat_alt": sat_alt.degrees,
"sat_az": sat_az.degrees,
}, index=[0]
)
], ignore_index=True)
# otherwise if solar angles are included
else:
df = pd.concat([
df,
pd.DataFrame.from_records(
{
"point_id": point.id,
"geometry": geo.Point(point.longitude, point.latitude),
"satellite": satellite.name,
"instrument": instrument.name,
"start": start,
"epoch": start + (end - start) / 2,
"end": end,
"sat_alt": sat_alt.degrees,
"sat_az": sat_az.degrees,
"sat_sunlit": None,
"solar_alt": None,
"solar_az": None,
"solar_time": None
}, index=[0]
)
], ignore_index=True)
# compute the access time for the observation (end - start)
df["access"] = df["end"] - df["start"]
# compute the revisit time for each observation (previous end - start)
df["revisit"] = df["end"] - df["start"].shift()
return gpd.GeoDataFrame(df, geometry=df.geometry, crs="EPSG:4326")
for j in range(len(events)):
if events[j] == 0:
# record the rise time
t_rise = t[j].utc_datetime()
elif events[j] == 1:
# record the culmination time
t_culminate = t[j].utc_datetime()
# find the satellite altitude, azimuth, and distance
sat_alt, sat_az, sat_dist = (sat - topos).at(t[j]).altaz()
if not omit_solar or instrument.req_target_sunlit is not None:
# find the solar altitude, azimuth, and distance
solar_obs = (
(eph["earth"] + topos).at(t[j]).observe(eph["sun"]).apparent()
)
solar_alt, solar_az, solar_dist = solar_obs.altaz()
# find the local solar time
solar_time = solar_obs.hadec()[0].hours + 12
if not omit_solar or instrument.req_self_sunlit is not None:
# find whether the satellite is sunlit
sat_sunlit = sat.at(t[j]).is_sunlit(eph)
elif events[j] == 2:
# record the set time
t_set = t[j].utc_datetime()
# only record an observation if a previous rise and culminate
# events were recorded (sometimes they are out-of-order)
if t_rise is not None and t_culminate is not None:
# check if the observation meets minimum access duration,
# ground sunlit conditions, and satellite sunlit conditions
if (
instrument.min_access_time <= t_set - t_rise <= max_access_time * 2
and instrument.is_valid_observation(
eph,
ts.from_datetime(t_culminate),
sat.at(ts.from_datetime(t_culminate)),
)
and (
instrument.duty_cycle >= 1
or any(ops_intervals.apply(lambda i: t_culminate in i))
)
):
# if omitting solar angles
if omit_solar:
df = pd.concat([
df,
pd.DataFrame.from_records(
{
"point_id": point.id,
"geometry": geo.Point(point.longitude, point.latitude),
"satellite": satellite.name,
"instrument": instrument.name,
"start": pd.Timestamp(t_rise),
"epoch": pd.Timestamp(t_culminate),
"end": pd.Timestamp(t_set),
"sat_alt": sat_alt.degrees,
"sat_az": sat_az.degrees,
}, index=[0]
)
], ignore_index=True)
# otherwise if solar angles are included
else:
df = pd.concat([
df,
pd.DataFrame.from_records(
{
"point_id": point.id,
"geometry": geo.Point(point.longitude, point.latitude),
"satellite": satellite.name,
"instrument": instrument.name,
"start": pd.Timestamp(t_rise),
"epoch": pd.Timestamp(t_culminate),
"end": pd.Timestamp(t_set),
"sat_alt": sat_alt.degrees,
"sat_az": sat_az.degrees,
"sat_sunlit": sat_sunlit,
"solar_alt": solar_alt.degrees,
"solar_az": solar_az.degrees,
"solar_time": solar_time,
}, index=[0]
)
], ignore_index=True)
# reset the variables for stepping through the event list
t_rise = None
t_culminate = None
sat_sunlit = None
solar_time = None
sat_alt = None
sat_az = None
solar_alt = None
solar_az = None
# compute the access time for each observation (end - start)
df["access"] = df["end"] - df["start"]
# compute the revisit time for each observation (previous end - start)
df["revisit"] = df["end"] - df["start"].shift()
return gpd.GeoDataFrame(df, geometry=df.geometry, crs="EPSG:4326")
def collect_multi_observations(
point: Point,
satellites: List[Satellite],
start: datetime,
end: datetime,
omit_solar: bool = True,
sample_distance: Optional[float] = None,
) -> gpd.GeoDataFrame:
"""
Collect multiple satellite observations of a geodetic point of interest.
:param point: The ground point of interest
:type point: :class:`tatc.schemas.point.Point`
:param satellites: The observing satellites
:type satellites: list of :class:`tatc.schemas.satellite.Satellite`
:param start: The start of the mission window
:type start: :`datetime.datetime`
:param end: The end of the mission window
:type end: :class:`datetime.datetime`
:param omit_solar: True, if solar angles should be omitted
to improve computational efficiency, defaults to True
:type omit_solar: bool, optional
:param sample_distance: Ground sample distance (m) to override
instrument field of regard, defaults to None
:type sample_distance: int, optional
:return: an instance of :class:`geopandas.GeoDataFrame` containing all
recorded observations
:rtype: :class:`geopandas.GeoDataFrame`
"""
gdfs = [
collect_observations(
point, satellite, instrument, start, end, omit_solar, sample_distance
)
for constellation in satellites
for satellite in (constellation.generate_members())
for instrument in satellite.instruments
]
# merge the observations into one data frame
df = pd.concat(gdfs, ignore_index=True)
# sort the values by start datetime
df = df.sort_values("start")
return gpd.GeoDataFrame(df, geometry=df.geometry, crs="EPSG:4326")
def aggregate_observations(gdf: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
"""
Aggregate constellation observations for a geodetic point of interest.
:param gdf: The individual observations.
:type gdf: :class:`geopandas.GeoDataFrame`
:return: An instance of :class:`geopandas.GeoDataFrame` The data frame
with aggregated observations.
:rtype: :class:`geopandas.GeoDataFrame`
"""
if len(gdf.index) == 0:
empty_df = pd.DataFrame(
{
"point_id": pd.Series([], dtype="int"),
"geometry": pd.Series([], dtype="object"),
"start": pd.Series([], dtype="datetime64[ns, utc]"),
"epoch": pd.Series([], dtype="datetime64[ns, utc]"),
"end": pd.Series([], dtype="datetime64[ns, utc]"),
"satellite": pd.Series([], dtype="str"),
"instrument": pd.Series([], dtype="str"),
"access": pd.Series([], dtype="timedelta64[ns]"),
"revisit": pd.Series([], dtype="timedelta64[ns]")
}
)
return gpd.GeoDataFrame(empty_df, geometry=empty_df.geometry, crs="EPSG:4326")
# sort the values by start datetime
df = gdf.sort_values("start")
# assign the observation group number based on overlapping start/end times
df["obs"] = (df["start"] > df["end"].shift().cummax()).cumsum()
if all(key in gdf.columns for key in ["solar_alt", "solar_az", "solar_time"]):
# reduce solar angles
df = df.groupby("obs").agg(
{
"point_id": "first",
"geometry": "first",
"start": "min",
"epoch": "first",
"end": "max",
"solar_alt": "mean",
"solar_az": "mean",
"solar_time": "mean",
"satellite": ", ".join,
"instrument": ", ".join,
}
)
else:
# reduce only core attributes
df = df.groupby("obs").agg(
{
"point_id": "first",
"geometry": "first",
"start": "min",
"epoch": "first",
"end": "max",
"satellite": ", ".join,
"instrument": ", ".join,
}
)
# compute the access time for each observation (end - start)
df["access"] = df["end"] - df["start"]
# compute the revisit time for each observation (previous end - start)
df["revisit"] = df["end"] - df["start"].shift()
return gpd.GeoDataFrame(df, geometry=df.geometry, crs="EPSG:4326")
def reduce_observations(gdf: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
"""
Reduce constellation observations for a geodetic point of interest.
:param gdf: The aggregated observations
:type gdf: :class:`geopandas.GeodataFrame`
:return: An instance of :class:`geopandas.GeoDataFrame`: The data frame
with reduced observations.
:rtype: :class:`geopanadas.GeodataFrame`
"""
if len(gdf.index) == 0:
empty_df = pd.DataFrame(
{
"point_id": pd.Series([], dtype="int"),
"geometry": pd.Series([], dtype="object"),
"access": | pd.Series([], dtype="timedelta64[ns]") | pandas.Series |
import argparse
import glob
import itertools
import os
import random
import numpy as np
import pandas as pd
from scipy.stats import ttest_ind, kendalltau
def parse_argument() -> argparse.Namespace:
"""
Parse input arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_folder',
type=str,
default='./other_seeds',
help='Folder containing hypotheses files with different random seeds.',
)
args = parser.parse_args()
return args
def get_index(filename):
names = ['sub', 'pred', 'obj', 'label', 'probability']
df = pd.read_csv(filename, sep='\t', names=names)
df = df[df['probability'] > 0.2]
df.sort_values(by='probability', ascending=False, inplace=True, kind='mergesort')
return df.index.tolist()
def get_ranks_and_index(filename):
names = ['sub', 'pred', 'obj', 'label', 'probability']
df = pd.read_csv(filename, sep='\t', names=names)
df = df[df['probability'] > 0.20]
df.sort_values(by='probability', ascending=False, inplace=True, kind='mergesort')
df['rank'] = df['probability'].rank(method='dense', ascending=False)
return df.index.tolist(), df['rank'].tolist()
def get_ranks_using_index(filename, index):
names = ['sub', 'pred', 'obj', 'label', 'probability']
df = pd.read_csv(filename, sep='\t', names=names)
df.sort_values(by='probability', ascending=False, inplace=True, kind='mergesort')
df['rank'] = df['probability'].rank(method='dense', ascending=False)
df = df.loc[index, :]
df.sort_values(by='probability', ascending=False, inplace=True, kind='mergesort')
return df['rank'].tolist()
def get_triples(filename):
names = ['sub', 'pred', 'obj', 'label', 'probability']
df = pd.read_csv(filename, sep='\t', names=names)
df = df[df['probability'] > 0.2]
df['triple'] = df.apply(lambda row: ' '.join([row['sub'], row['pred'], row['obj']]), axis=1)
return df['triple'].tolist()
def rbo(l1, l2, p=0.98):
"""
https://github.com/ragrawal/measures/blob/master/measures/rankedlist/RBO.py
"""
"""
Calculates Ranked Biased Overlap (RBO) score.
l1 -- Ranked List 1
l2 -- Ranked List 2
"""
if l1 == None: l1 = []
if l2 == None: l2 = []
sl,ll = sorted([(len(l1), l1),(len(l2),l2)])
s, S = sl
l, L = ll
if s == 0: return 0
# Calculate the overlaps at ranks 1 through l
# (the longer of the two lists)
ss = set([]) # contains elements from the smaller list till depth i
ls = set([]) # contains elements from the longer list till depth i
x_d = {0: 0}
sum1 = 0.0
for i in range(l):
x = L[i]
y = S[i] if i < s else None
d = i + 1
# if two elements are same then
# we don't need to add to either of the set
if x == y:
x_d[d] = x_d[d-1] + 1.0
# else add items to respective list
# and calculate overlap
else:
ls.add(x)
if y != None: ss.add(y)
x_d[d] = x_d[d-1] + (1.0 if x in ss else 0.0) + (1.0 if y in ls else 0.0)
#calculate average overlap
sum1 += x_d[d]/d * pow(p, d)
sum2 = 0.0
for i in range(l-s):
d = s+i+1
sum2 += x_d[d]*(d-s)/(d*s)*pow(p,d)
sum3 = ((x_d[l]-x_d[s])/l+x_d[s]/s)*pow(p,l)
# Equation 32
rbo_ext = (1-p)/p*(sum1+sum2)+sum3
return rbo_ext
def main():
args = parse_argument()
files_without_original = glob.glob(os.path.join(args.input_folder, '*.txt'))
files_with_original = files_without_original + ['./hypotheses_confidence.txt']
print(f'Number of files without original found: {len(files_without_original)}')
print(f'Number of files with original found: {len(files_with_original)}')
# #######
# # rbo #
# #######
# # our hypotheses
# all_ranks = []
# for idx, f in enumerate(files_with_original):
# print(f'Processing {idx+1}/{len(files_with_original)}...')
# all_ranks.append(get_index(f))
# rbo_list = []
# for rp in list(itertools.combinations(all_ranks, 2)):
# rbo_list.append(rbo(rp[0], rp[1], p=0.99))
# print(f'RBO value: {np.mean(rbo_list)} +- {np.var(rbo_list)}')
# # random baseline
# all_ranks_baseline = []
# for r in all_ranks:
# all_ranks_baseline.append(random.sample(range(108078), len(r)))
# rbo_baseline_list = []
# for rp in list(itertools.combinations(all_ranks_baseline, 2)):
# rbo_baseline_list.append(rbo(rp[0], rp[1], p=0.99))
# print(f'Baseline RBO value: {np.mean(rbo_baseline_list)} +- {np.var(rbo_baseline_list)}')
# _, pval = ttest_ind(rbo_list, rbo_baseline_list)
# print(f'p-value: {pval}')
# ##############
# # kendalltau #
# ##############
# original_index, original_rank = get_ranks_and_index('./hypotheses_confidence.txt')
# other_ranks = [original_rank]
# for idx, f in enumerate(files_without_original):
# print(f'Processing {idx+1}/{len(files_without_original)}...')
# other_ranks.append(get_ranks_using_index(f, original_index))
# taus_list = []
# for rp in list(itertools.combinations(other_ranks, 2)):
# tau, pval = kendalltau(rp[0], rp[1])
# taus_list.append(tau)
# print(f'tau: {np.mean(taus_list)} +- {np.var(taus_list)}')
# # random baseline
# other_ranks_baseline = []
# for _ in other_ranks:
# other_ranks_baseline.append(random.sample(range(108078), len(original_rank)))
# taus_baseline_list = []
# for rp in list(itertools.combinations(other_ranks_baseline, 2)):
# tau, pval = kendalltau(rp[0], rp[1])
# taus_baseline_list.append(tau)
# print(f'Baseline tau: {np.mean(taus_baseline_list)} +- {np.var(taus_baseline_list)}')
# _, pval = ttest_ind(taus_list, taus_baseline_list)
# print(f'p-value: {pval}')
#####################
# common hypotheses #
#####################
# validated hypotheses
df_validated = pd.read_csv('../figure5/all_validated_hypothesis.txt', sep='\t')
df_validated2 = pd.read_csv('../figure5/all_validated_hypothesis_cycle_2.txt', sep='\t')
df_validated2 = df_validated2[['Subject', 'Predicate', 'Object', 'Resistance']]
df_validated = pd.concat([df_validated, df_validated2], ignore_index=True)
df_validated['triple'] = df_validated.apply(lambda row: ' '.join([row['Subject'], row['Predicate'], row['Object']]), axis=1)
df_validated = df_validated[['triple', 'Resistance']]
# common hypotheses
all_triples = []
for f in files_with_original:
all_triples.extend(get_triples(f))
all_triples_dict = {}
for x in all_triples:
if x in all_triples_dict:
all_triples_dict[x] += 1
else:
all_triples_dict[x] = 1
df_all_triples = | pd.DataFrame.from_dict(all_triples_dict, orient='index', columns=['count']) | pandas.DataFrame.from_dict |
"""Mock data for bwaw.insights tests."""
import pandas as pd
ACTIVE_BUSES = pd.DataFrame([
['213', 21.0921481, '1001', '2021-02-09 15:45:27', 52.224536, '2'],
['213', 21.0911025, '1001', '2021-02-09 15:46:22', 52.2223788, '2'],
['138', 21.0921481, '1001', '2021-02-09 15:45:27', 52.224536, '05'],
['138', 21.0911025, '1001', '2021-02-09 15:46:22', 52.2223788, '05']
], columns=['Lines', 'Lon', 'VehicleNumber', 'Time', 'Lat', 'Brigade'])
ACTIVE_BUSES['Time'] = pd.to_datetime(ACTIVE_BUSES['Time'])
COORDINATES = pd.DataFrame([
['1001', '01', 52.224536, 21.0921481, 'al.Zieleniecka', '2020-10-12 00:00:00.0']
], columns=['ID', 'Number', 'Latitude', 'Longitude', 'Destination', 'Validity'])
TIMETABLE = [{'Brigade': '2', 'Destination': 'al.Zieleniecka', 'Time': '15:46:00'}]
SPEED_INCIDENT = pd.DataFrame([
[16.378041, 52.223457, 21.091625, '2021-02-09 15:45:54.500']
], columns=['Speed', 'Lat', 'Lon', 'Time'])
SPEED_INCIDENT['Time'] = pd.to_datetime(SPEED_INCIDENT['Time'])
SPEED_INCIDENTS = | pd.DataFrame([
['213', 16.378041, 52.223457, 21.091625, '2021-02-09 15:45:54.500'],
['138', 16.378041, 52.223457, 21.091625, '2021-02-09 15:45:54.500']
], columns=['Lines', 'Speed', 'Lat', 'Lon', 'Time']) | pandas.DataFrame |
from datetime import datetime, timedelta
import operator
from typing import Any, Sequence, Type, Union, cast
import warnings
import numpy as np
from pandas._libs import NaT, NaTType, Timestamp, algos, iNaT, lib
from pandas._libs.tslibs.c_timestamp import integer_op_not_supported
from pandas._libs.tslibs.period import DIFFERENT_FREQ, IncompatibleFrequency, Period
from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds
from pandas._libs.tslibs.timestamps import RoundTo, round_nsint64
from pandas._typing import DatetimeLikeScalar
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError, NullFrequencyError, PerformanceWarning
from pandas.util._decorators import Appender, Substitution
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_period_dtype,
is_string_dtype,
is_timedelta64_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna
from pandas.core import missing, nanops, ops
from pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts
from pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin
import pandas.core.common as com
from pandas.core.indexers import check_bool_array_indexer
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.ops.invalid import invalid_comparison, make_invalid_op
from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick
def _datetimelike_array_cmp(cls, op):
"""
Wrap comparison operations to convert Timestamp/Timedelta/Period-like to
boxed scalars/arrays.
"""
opname = f"__{op.__name__}__"
nat_result = opname == "__ne__"
@unpack_zerodim_and_defer(opname)
def wrapper(self, other):
if isinstance(other, str):
try:
# GH#18435 strings get a pass from tzawareness compat
other = self._scalar_from_string(other)
except ValueError:
# failed to parse as Timestamp/Timedelta/Period
return invalid_comparison(self, other, op)
if isinstance(other, self._recognized_scalars) or other is NaT:
other = self._scalar_type(other)
self._check_compatible_with(other)
other_i8 = self._unbox_scalar(other)
result = op(self.view("i8"), other_i8)
if isna(other):
result.fill(nat_result)
elif not is_list_like(other):
return invalid_comparison(self, other, op)
elif len(other) != len(self):
raise ValueError("Lengths must match")
else:
if isinstance(other, list):
# TODO: could use pd.Index to do inference?
other = np.array(other)
if not isinstance(other, (np.ndarray, type(self))):
return invalid_comparison(self, other, op)
if is_object_dtype(other):
# We have to use comp_method_OBJECT_ARRAY instead of numpy
# comparison otherwise it would fail to raise when
# comparing tz-aware and tz-naive
with np.errstate(all="ignore"):
result = ops.comp_method_OBJECT_ARRAY(
op, self.astype(object), other
)
o_mask = isna(other)
elif not type(self)._is_recognized_dtype(other.dtype):
return invalid_comparison(self, other, op)
else:
# For PeriodDType this casting is unnecessary
other = type(self)._from_sequence(other)
self._check_compatible_with(other)
result = op(self.view("i8"), other.view("i8"))
o_mask = other._isnan
if o_mask.any():
result[o_mask] = nat_result
if self._hasnans:
result[self._isnan] = nat_result
return result
return set_function_name(wrapper, opname, cls)
class AttributesMixin:
_data: np.ndarray
@classmethod
def _simple_new(cls, values, **kwargs):
raise AbstractMethodError(cls)
@property
def _scalar_type(self) -> Type[DatetimeLikeScalar]:
"""The scalar associated with this datelike
* PeriodArray : Period
* DatetimeArray : Timestamp
* TimedeltaArray : Timedelta
"""
raise AbstractMethodError(self)
def _scalar_from_string(
self, value: str
) -> Union[Period, Timestamp, Timedelta, NaTType]:
"""
Construct a scalar type from a string.
Parameters
----------
value : str
Returns
-------
Period, Timestamp, or Timedelta, or NaT
Whatever the type of ``self._scalar_type`` is.
Notes
-----
This should call ``self._check_compatible_with`` before
unboxing the result.
"""
raise AbstractMethodError(self)
def _unbox_scalar(self, value: Union[Period, Timestamp, Timedelta, NaTType]) -> int:
"""
Unbox the integer value of a scalar `value`.
Parameters
----------
value : Union[Period, Timestamp, Timedelta]
Returns
-------
int
Examples
--------
>>> self._unbox_scalar(Timedelta('10s')) # DOCTEST: +SKIP
10000000000
"""
raise AbstractMethodError(self)
def _check_compatible_with(
self, other: Union[Period, Timestamp, Timedelta, NaTType], setitem: bool = False
) -> None:
"""
Verify that `self` and `other` are compatible.
* DatetimeArray verifies that the timezones (if any) match
* PeriodArray verifies that the freq matches
* Timedelta has no verification
In each case, NaT is considered compatible.
Parameters
----------
other
setitem : bool, default False
For __setitem__ we may have stricter compatiblity resrictions than
for comparisons.
Raises
------
Exception
"""
raise AbstractMethodError(self)
class DatelikeOps:
"""
Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.
"""
@Substitution(
URL="https://docs.python.org/3/library/datetime.html"
"#strftime-and-strptime-behavior"
)
def strftime(self, date_format):
"""
Convert to Index using specified date_format.
Return an Index of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format
doc <%(URL)s>`__.
Parameters
----------
date_format : str
Date format string (e.g. "%%Y-%%m-%%d").
Returns
-------
ndarray
NumPy ndarray of formatted strings.
See Also
--------
to_datetime : Convert the given argument to datetime.
DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
DatetimeIndex.round : Round the DatetimeIndex to the specified freq.
DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
Examples
--------
>>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"),
... periods=3, freq='s')
>>> rng.strftime('%%B %%d, %%Y, %%r')
Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
'March 10, 2018, 09:00:02 AM'],
dtype='object')
"""
result = self._format_native_types(date_format=date_format, na_rep=np.nan)
return result.astype(object)
class TimelikeOps:
"""
Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.
"""
_round_doc = """
Perform {op} operation on the data to the specified `freq`.
Parameters
----------
freq : str or Offset
The frequency level to {op} the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end). See
:ref:`frequency aliases <timeseries.offset_aliases>` for
a list of possible `freq` values.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
Only relevant for DatetimeIndex:
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
.. versionadded:: 0.24.0
nonexistent : 'shift_forward', 'shift_backward', 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
.. versionadded:: 0.24.0
Returns
-------
DatetimeIndex, TimedeltaIndex, or Series
Index of the same type for a DatetimeIndex or TimedeltaIndex,
or a Series with the same index for a Series.
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
**DatetimeIndex**
>>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min')
>>> rng
DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',
'2018-01-01 12:01:00'],
dtype='datetime64[ns]', freq='T')
"""
_round_example = """>>> rng.round('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.round("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
_floor_example = """>>> rng.floor('H')
DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.floor("H")
0 2018-01-01 11:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
_ceil_example = """>>> rng.ceil('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 13:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.ceil("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 13:00:00
dtype: datetime64[ns]
"""
def _round(self, freq, mode, ambiguous, nonexistent):
# round the local times
if is_datetime64tz_dtype(self):
# operate on naive timestamps, then convert back to aware
naive = self.tz_localize(None)
result = naive._round(freq, mode, ambiguous, nonexistent)
aware = result.tz_localize(
self.tz, ambiguous=ambiguous, nonexistent=nonexistent
)
return aware
values = self.view("i8")
result = round_nsint64(values, mode, freq)
result = self._maybe_mask_results(result, fill_value=NaT)
return self._simple_new(result, dtype=self.dtype)
@Appender((_round_doc + _round_example).format(op="round"))
def round(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent)
@Appender((_round_doc + _floor_example).format(op="floor"))
def floor(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent)
@Appender((_round_doc + _ceil_example).format(op="ceil"))
def ceil(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent)
class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin, ExtensionArray):
"""
Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray
Assumes that __new__/__init__ defines:
_data
_freq
and that the inheriting class has methods:
_generate_range
"""
@property
def ndim(self) -> int:
return self._data.ndim
@property
def shape(self):
return self._data.shape
def reshape(self, *args, **kwargs):
# Note: we drop any freq
data = self._data.reshape(*args, **kwargs)
return type(self)(data, dtype=self.dtype)
def ravel(self, *args, **kwargs):
# Note: we drop any freq
data = self._data.ravel(*args, **kwargs)
return type(self)(data, dtype=self.dtype)
@property
def _box_func(self):
"""
box function to get object from internal representation
"""
raise AbstractMethodError(self)
def _box_values(self, values):
"""
apply box func to passed values
"""
return lib.map_infer(values, self._box_func)
def __iter__(self):
return (self._box_func(v) for v in self.asi8)
@property
def asi8(self) -> np.ndarray:
"""
Integer representation of the values.
Returns
-------
ndarray
An ndarray with int64 dtype.
"""
# do not cache or you'll create a memory leak
return self._data.view("i8")
@property
def _ndarray_values(self):
return self._data
# ----------------------------------------------------------------
# Rendering Methods
def _format_native_types(self, na_rep="NaT", date_format=None):
"""
Helper method for astype when converting to strings.
Returns
-------
ndarray[str]
"""
raise AbstractMethodError(self)
def _formatter(self, boxed=False):
# TODO: Remove Datetime & DatetimeTZ formatters.
return "'{}'".format
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
@property
def nbytes(self):
return self._data.nbytes
def __array__(self, dtype=None) -> np.ndarray:
# used for Timedelta/DatetimeArray, overwritten by PeriodArray
if is_object_dtype(dtype):
return np.array(list(self), dtype=object)
return self._data
@property
def size(self) -> int:
"""The number of elements in this array."""
return np.prod(self.shape)
def __len__(self) -> int:
return len(self._data)
def __getitem__(self, key):
"""
This getitem defers to the underlying array, which by-definition can
only handle list-likes, slices, and integer scalars
"""
is_int = lib.is_integer(key)
if lib.is_scalar(key) and not is_int:
raise IndexError(
"only integers, slices (`:`), ellipsis (`...`), "
"numpy.newaxis (`None`) and integer or boolean "
"arrays are valid indices"
)
getitem = self._data.__getitem__
if is_int:
val = getitem(key)
if lib.is_scalar(val):
# i.e. self.ndim == 1
return self._box_func(val)
return type(self)(val, dtype=self.dtype)
if com.is_bool_indexer(key):
key = check_bool_array_indexer(self, key)
if key.all():
key = slice(0, None, None)
else:
key = lib.maybe_booleans_to_slice(key.view(np.uint8))
is_period = is_period_dtype(self)
if is_period:
freq = self.freq
else:
freq = None
if isinstance(key, slice):
if self.freq is not None and key.step is not None:
freq = key.step * self.freq
else:
freq = self.freq
elif key is Ellipsis:
# GH#21282 indexing with Ellipsis is similar to a full slice,
# should preserve `freq` attribute
freq = self.freq
result = getitem(key)
if result.ndim > 1:
# To support MPL which performs slicing with 2 dim
# even though it only has 1 dim by definition
if is_period:
return self._simple_new(result, dtype=self.dtype, freq=freq)
return result
return self._simple_new(result, dtype=self.dtype, freq=freq)
def __setitem__(
self,
key: Union[int, Sequence[int], Sequence[bool], slice],
value: Union[NaTType, Any, Sequence[Any]],
) -> None:
# I'm fudging the types a bit here. "Any" above really depends
# on type(self). For PeriodArray, it's Period (or stuff coercible
# to a period in from_sequence). For DatetimeArray, it's Timestamp...
# I don't know if mypy can do that, possibly with Generics.
# https://mypy.readthedocs.io/en/latest/generics.html
if lib.is_scalar(value) and not isna(value):
value = com.maybe_box_datetimelike(value)
if is_list_like(value):
is_slice = isinstance(key, slice)
if lib.is_scalar(key):
raise ValueError("setting an array element with a sequence.")
if not is_slice:
key = cast(Sequence, key)
if len(key) != len(value) and not com.is_bool_indexer(key):
msg = (
f"shape mismatch: value array of length '{len(key)}' "
"does not match indexing result of length "
f"'{len(value)}'."
)
raise ValueError(msg)
elif not len(key):
return
value = type(self)._from_sequence(value, dtype=self.dtype)
self._check_compatible_with(value, setitem=True)
value = value.asi8
elif isinstance(value, self._scalar_type):
self._check_compatible_with(value, setitem=True)
value = self._unbox_scalar(value)
elif is_valid_nat_for_dtype(value, self.dtype):
value = iNaT
else:
msg = (
f"'value' should be a '{self._scalar_type.__name__}', 'NaT', "
f"or array of those. Got '{type(value).__name__}' instead."
)
raise TypeError(msg)
self._data[key] = value
self._maybe_clear_freq()
def _maybe_clear_freq(self):
# inplace operations like __setitem__ may invalidate the freq of
# DatetimeArray and TimedeltaArray
pass
def astype(self, dtype, copy=True):
# Some notes on cases we don't have to handle here in the base class:
# 1. PeriodArray.astype handles period -> period
# 2. DatetimeArray.astype handles conversion between tz.
# 3. DatetimeArray.astype handles datetime -> period
from pandas import Categorical
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
return self._box_values(self.asi8)
elif is_string_dtype(dtype) and not is_categorical_dtype(dtype):
return self._format_native_types()
elif is_integer_dtype(dtype):
# we deliberately ignore int32 vs. int64 here.
# See https://github.com/pandas-dev/pandas/issues/24381 for more.
values = self.asi8
if is_unsigned_integer_dtype(dtype):
# Again, we ignore int32 vs. int64
values = values.view("uint64")
if copy:
values = values.copy()
return values
elif (
is_datetime_or_timedelta_dtype(dtype)
and not is_dtype_equal(self.dtype, dtype)
) or is_float_dtype(dtype):
# disallow conversion between datetime/timedelta,
# and conversions for any datetimelike to float
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
raise TypeError(msg)
elif is_categorical_dtype(dtype):
return Categorical(self, dtype=dtype)
else:
return np.asarray(self, dtype=dtype)
def view(self, dtype=None):
if dtype is None or dtype is self.dtype:
return type(self)(self._data, dtype=self.dtype)
return self._data.view(dtype=dtype)
# ------------------------------------------------------------------
# ExtensionArray Interface
def unique(self):
result = unique1d(self.asi8)
return type(self)(result, dtype=self.dtype)
def _validate_fill_value(self, fill_value):
"""
If a fill_value is passed to `take` convert it to an i8 representation,
raising ValueError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : np.int64
Raises
------
ValueError
"""
if isna(fill_value):
fill_value = iNaT
elif isinstance(fill_value, self._recognized_scalars):
self._check_compatible_with(fill_value)
fill_value = self._scalar_type(fill_value)
fill_value = self._unbox_scalar(fill_value)
else:
raise ValueError(
f"'fill_value' should be a {self._scalar_type}. Got '{fill_value}'."
)
return fill_value
def take(self, indices, allow_fill=False, fill_value=None):
if allow_fill:
fill_value = self._validate_fill_value(fill_value)
new_values = take(
self.asi8, indices, allow_fill=allow_fill, fill_value=fill_value
)
return type(self)(new_values, dtype=self.dtype)
@classmethod
def _concat_same_type(cls, to_concat):
dtypes = {x.dtype for x in to_concat}
assert len(dtypes) == 1
dtype = list(dtypes)[0]
values = np.concatenate([x.asi8 for x in to_concat])
return cls(values, dtype=dtype)
def copy(self):
values = self.asi8.copy()
return type(self)._simple_new(values, dtype=self.dtype, freq=self.freq)
def _values_for_factorize(self):
return self.asi8, iNaT
@classmethod
def _from_factorized(cls, values, original):
return cls(values, dtype=original.dtype)
def _values_for_argsort(self):
return self._data
# ------------------------------------------------------------------
# Additional array methods
# These are not part of the EA API, but we implement them because
# pandas assumes they're there.
def searchsorted(self, value, side="left", sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `value`.
"""
if isinstance(value, str):
value = self._scalar_from_string(value)
if not (isinstance(value, (self._scalar_type, type(self))) or isna(value)):
raise ValueError(f"Unexpected type for 'value': {type(value)}")
self._check_compatible_with(value)
if isinstance(value, type(self)):
value = value.asi8
else:
value = self._unbox_scalar(value)
return self.asi8.searchsorted(value, side=side, sorter=sorter)
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an array.
See Also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
values = self._data.repeat(repeats)
return type(self)(values.view("i8"), dtype=self.dtype)
def value_counts(self, dropna=False):
"""
Return a Series containing counts of unique values.
Parameters
----------
dropna : bool, default True
Don't include counts of NaT values.
Returns
-------
Series
"""
from pandas import Series, Index
if dropna:
values = self[~self.isna()]._data
else:
values = self._data
cls = type(self)
result = value_counts(values, sort=False, dropna=dropna)
index = Index(
cls(result.index.view("i8"), dtype=self.dtype), name=result.index.name
)
return Series(result.values, index=index, name=result.name)
def map(self, mapper):
# TODO(GH-23179): Add ExtensionArray.map
# Need to figure out if we want ExtensionArray.map first.
# If so, then we can refactor IndexOpsMixin._map_values to
# a standalone function and call from here..
# Else, just rewrite _map_infer_values to do the right thing.
from pandas import Index
return Index(self).map(mapper).array
# ------------------------------------------------------------------
# Null Handling
def isna(self):
return self._isnan
@property # NB: override with cache_readonly in immutable subclasses
def _isnan(self):
"""
return if each value is nan
"""
return self.asi8 == iNaT
@property # NB: override with cache_readonly in immutable subclasses
def _hasnans(self):
"""
return if I have any nans; enables various perf speedups
"""
return bool(self._isnan.any())
def _maybe_mask_results(self, result, fill_value=iNaT, convert=None):
"""
Parameters
----------
result : a ndarray
fill_value : object, default iNaT
convert : str, dtype or None
Returns
-------
result : ndarray with values replace by the fill_value
mask the result if needed, convert to the provided dtype if its not
None
This is an internal routine.
"""
if self._hasnans:
if convert:
result = result.astype(convert)
if fill_value is None:
fill_value = np.nan
result[self._isnan] = fill_value
return result
def fillna(self, value=None, method=None, limit=None):
# TODO(GH-20300): remove this
# Just overriding to ensure that we avoid an astype(object).
# Either 20300 or a `_values_for_fillna` would avoid this duplication.
if isinstance(value, ABCSeries):
value = value.array
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
if is_array_like(value):
if len(value) != len(self):
raise ValueError(
f"Length of 'value' does not match. Got ({len(value)}) "
f" expected {len(self)}"
)
value = value[mask]
if mask.any():
if method is not None:
if method == "pad":
func = missing.pad_1d
else:
func = missing.backfill_1d
values = self._data
if not is_period_dtype(self):
# For PeriodArray self._data is i8, which gets copied
# by `func`. Otherwise we need to make a copy manually
# to avoid modifying `self` in-place.
values = values.copy()
new_values = func(values, limit=limit, mask=mask)
if is_datetime64tz_dtype(self):
# we need to pass int64 values to the constructor to avoid
# re-localizing incorrectly
new_values = new_values.view("i8")
new_values = type(self)(new_values, dtype=self.dtype)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
# ------------------------------------------------------------------
# Frequency Properties/Methods
@property
def freq(self):
"""
Return the frequency object if it is set, otherwise None.
"""
return self._freq
@freq.setter
def freq(self, value):
if value is not None:
value = frequencies.to_offset(value)
self._validate_frequency(self, value)
self._freq = value
@property
def freqstr(self):
"""
Return the frequency object as a string if its set, otherwise None
"""
if self.freq is None:
return None
return self.freq.freqstr
@property # NB: override with cache_readonly in immutable subclasses
def inferred_freq(self):
"""
Tryies to return a string representing a frequency guess,
generated by infer_freq. Returns None if it can't autodetect the
frequency.
"""
if self.ndim != 1:
return None
try:
return frequencies.infer_freq(self)
except ValueError:
return None
@property # NB: override with cache_readonly in immutable subclasses
def _resolution(self):
return frequencies.Resolution.get_reso_from_freq(self.freqstr)
@property # NB: override with cache_readonly in immutable subclasses
def resolution(self):
"""
Returns day, hour, minute, second, millisecond or microsecond
"""
return frequencies.Resolution.get_str(self._resolution)
@classmethod
def _validate_frequency(cls, index, freq, **kwargs):
"""
Validate that a frequency is compatible with the values of a given
Datetime Array/Index or Timedelta Array/Index
Parameters
----------
index : DatetimeIndex or TimedeltaIndex
The index on which to determine if the given frequency is valid
freq : DateOffset
The frequency to validate
"""
if is_period_dtype(cls):
# Frequency validation is not meaningful for Period Array/Index
return None
inferred = index.inferred_freq
if index.size == 0 or inferred == freq.freqstr:
return None
try:
on_freq = cls._generate_range(
start=index[0], end=None, periods=len(index), freq=freq, **kwargs
)
if not np.array_equal(index.asi8, on_freq.asi8):
raise ValueError
except ValueError as e:
if "non-fixed" in str(e):
# non-fixed frequencies are not meaningful for timedelta64;
# we retain that error message
raise e
# GH#11587 the main way this is reached is if the `np.array_equal`
# check above is False. This can also be reached if index[0]
# is `NaT`, in which case the call to `cls._generate_range` will
# raise a ValueError, which we re-raise with a more targeted
# message.
raise ValueError(
f"Inferred frequency {inferred} from passed values "
f"does not conform to passed frequency {freq.freqstr}"
)
# monotonicity/uniqueness properties are called via frequencies.infer_freq,
# see GH#23789
@property
def _is_monotonic_increasing(self):
return algos.is_monotonic(self.asi8, timelike=True)[0]
@property
def _is_monotonic_decreasing(self):
return algos.is_monotonic(self.asi8, timelike=True)[1]
@property
def _is_unique(self):
return len(unique1d(self.asi8)) == len(self)
# ------------------------------------------------------------------
# Arithmetic Methods
_create_comparison_method = classmethod(_datetimelike_array_cmp)
# pow is invalid for all three subclasses; TimedeltaArray will override
# the multiplication and division ops
__pow__ = make_invalid_op("__pow__")
__rpow__ = make_invalid_op("__rpow__")
__mul__ = make_invalid_op("__mul__")
__rmul__ = make_invalid_op("__rmul__")
__truediv__ = make_invalid_op("__truediv__")
__rtruediv__ = make_invalid_op("__rtruediv__")
__floordiv__ = make_invalid_op("__floordiv__")
__rfloordiv__ = make_invalid_op("__rfloordiv__")
__mod__ = make_invalid_op("__mod__")
__rmod__ = make_invalid_op("__rmod__")
__divmod__ = make_invalid_op("__divmod__")
__rdivmod__ = make_invalid_op("__rdivmod__")
def _add_datetimelike_scalar(self, other):
# Overridden by TimedeltaArray
raise TypeError(f"cannot add {type(self).__name__} and {type(other).__name__}")
_add_datetime_arraylike = _add_datetimelike_scalar
def _sub_datetimelike_scalar(self, other):
# Overridden by DatetimeArray
assert other is not NaT
raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}")
_sub_datetime_arraylike = _sub_datetimelike_scalar
def _sub_period(self, other):
# Overridden by PeriodArray
raise TypeError(f"cannot subtract Period from a {type(self).__name__}")
def _add_offset(self, offset):
raise AbstractMethodError(self)
def _add_delta(self, other):
"""
Add a timedelta-like, Tick or TimedeltaIndex-like object
to self, yielding an int64 numpy array
Parameters
----------
delta : {timedelta, np.timedelta64, Tick,
TimedeltaIndex, ndarray[timedelta64]}
Returns
-------
result : ndarray[int64]
Notes
-----
The result's name is set outside of _add_delta by the calling
method (__add__ or __sub__), if necessary (i.e. for Indexes).
"""
if isinstance(other, (Tick, timedelta, np.timedelta64)):
new_values = self._add_timedeltalike_scalar(other)
elif is_timedelta64_dtype(other):
# ndarray[timedelta64] or TimedeltaArray/index
new_values = self._add_delta_tdi(other)
return new_values
def _add_timedeltalike_scalar(self, other):
"""
Add a delta of a timedeltalike
return the i8 result view
"""
if isna(other):
# i.e np.timedelta64("NaT"), not recognized by delta_to_nanoseconds
new_values = np.empty(self.shape, dtype="i8")
new_values[:] = iNaT
return new_values
inc = delta_to_nanoseconds(other)
new_values = checked_add_with_arr(self.asi8, inc, arr_mask=self._isnan).view(
"i8"
)
new_values = self._maybe_mask_results(new_values)
return new_values.view("i8")
def _add_delta_tdi(self, other):
"""
Add a delta of a TimedeltaIndex
return the i8 result view
"""
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
if isinstance(other, np.ndarray):
# ndarray[timedelta64]; wrap in TimedeltaIndex for op
from pandas.core.arrays import TimedeltaArray
other = TimedeltaArray._from_sequence(other)
self_i8 = self.asi8
other_i8 = other.asi8
new_values = checked_add_with_arr(
self_i8, other_i8, arr_mask=self._isnan, b_mask=other._isnan
)
if self._hasnans or other._hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = iNaT
return new_values.view("i8")
def _add_nat(self):
"""
Add pd.NaT to self
"""
if | is_period_dtype(self) | pandas.core.dtypes.common.is_period_dtype |
from constants import *
import os
import json
import pandas as pd
import requests
from pathlib import Path
def clean_name(df, name_column, new_column, remove_list):
# Lowercase everything
df[new_column] = df[name_column].str.lower()
remove_list = [x.lower() for x in remove_list]
# Remove undesired words
#df[new_column] = df[new_column].str.replace('|'.join(remove_list), '')
# Get only name
#df[new_column] = df[new_column].str.split().str[0]
# Title case names
df[new_column] = df[new_column].str.title()
return df
def query_gender_api(name, nationality):
url = "https://gender-api.com/v2/gender"
if len(name) < 3:
name = name+"{}".format("".join([" " for i in range(3-len(name))]))
payload = "{\"full_name\": \"%s\", \"country\": \"%s\"}" % (name, nationality)
headers = {
'Authorization': 'Bearer {}'.format(api_key),
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload.encode('utf-8'))
response = json.loads(response.content)
try:
return response['gender'], response['probability'], response['details']['samples'], response['details']['first_name_sanitized']
except:
return "Unknown", None, None, None
def query_genderize_api(name, nationality):
query = "https://api.genderize.io/?name={0}&country_id={1}".format(name, nationality)
if api_key:
query += "&apikey={}".format(api_key)
r = requests.get(query)
r.raise_for_status()
response = json.loads(str(r.text))
return response['gender'], response['probability'], response['count']
def gender_name(df, nationality_col, names_df, name_col='clean_name'):
df_temp = df
df_temp['first_name'] = df_temp[name_col].str.split().str.get(0)
df_previous = pd.merge(df_temp, names_df, left_on=['first_name', nationality_col], right_on=['sanitized_name', 'nationality'], how='left')
df_known = df_previous[~df_previous['gender'].isnull()]
df_unknown = df_previous[df_previous['gender'].isnull()]
df_unknown['gender'], df_unknown['probability'], df_unknown['count'], df_unknown['sanitized_name'] = zip(*df_unknown.apply(lambda x: query_gender_api(x[name_col], x[nationality_col]), axis=1))
df_unknown['sanitized_name'] = df_unknown['sanitized_name'].str.capitalize()
names_df = names_df.append(df_unknown[['sanitized_name', 'nationality', 'gender', 'probability', 'count']]).reset_index(drop=True)
names_df = names_df.drop_duplicates()
return df_known.append(df_unknown), names_df
def genderize_dataframe(path,
names_df,
name_col,
nationality_col):
df = | pd.read_csv(path, index_col=0) | pandas.read_csv |
from context import dero
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
class DataFrameTest:
df = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_duplicate_row = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/3/2000', 1.03), #this is a duplicated row
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_weight = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1),
(10516, 'a', '1/4/2000', 1.04, 0),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 1),
(10516, 'b', '1/4/2000', 1.08, 1),
(10517, 'a', '1/1/2000', 1.09, 0),
(10517, 'a', '1/2/2000', 1.1, 0),
(10517, 'a', '1/3/2000', 1.11, 0),
(10517, 'a', '1/4/2000', 1.12, 1),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight'])
df_nan_byvar = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', 3),
('b', 4),
], columns = ['byvar', 'val'])
df_nan_byvar_and_val = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', nan),
('b', 4),
], columns = ['byvar', 'val'])
single_ticker_df = pd.DataFrame(data = [
('a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['byvar', 'Date', 'TICKER'])
df_datetime = df.copy()
df_datetime['Date'] = pd.to_datetime(df_datetime['Date'])
df_datetime_no_ret = df_datetime.copy()
df_datetime_no_ret.drop('RET', axis=1, inplace=True)
df_gvkey_str = pd.DataFrame([
('001076','3/1/1995'),
('001076','4/1/1995'),
('001722','1/1/2012'),
('001722','7/1/2012'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str['Date'] = | pd.to_datetime(df_gvkey_str['Date']) | pandas.to_datetime |
'''
This module is used for content-based filtering
'''
import os.path
from ast import literal_eval
import pickle
import pandas as pd
import numpy
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from nltk.stem.snowball import SnowballStemmer
Run = False
if __name__ == "__main__":
Run = True
print("process FILE CP")
if not os.path.isfile('CP.txt'):
Run = True
print("process FILE CP")
if Run:
MD = pd.read_csv('movies-dataset/movies_metadata.csv')
MD['genres'] = MD['genres'].fillna('[]')
MD['genres'] = MD['genres'].apply(literal_eval)
MD['genres'] = MD['genres'].apply(lambda x: [i['name'] for i in x] if isinstance(x, list) else [])
VOTE_COUNTS = MD[MD['vote_count'].notnull()]['vote_count'].astype('int')
VOTE_AVERAGES = MD[MD['vote_average'].notnull()]['vote_average'].astype('int')
C = VOTE_AVERAGES.mean()
M = VOTE_COUNTS.quantile(0.95)
MD['year'] = pd.to_datetime(MD['release_date'], errors='coerce')
MD['year'] = MD['year'].apply(lambda x: str(x).split('-')[0] if x != numpy.nan else numpy.nan)
LINKS = | pd.read_csv('movies-dataset/links.csv') | pandas.read_csv |
from __future__ import print_function
import os
import torch
import numpy as np
import shutil
import pandas as pd
def test(model, dataloader, use_cuda, criterion, full_return=False, log_path=None):
"""
Computes the balanced accuracy of the model
:param model: the network (subclass of nn.Module)
:param dataloader: a DataLoader wrapping a dataset
:param use_cuda: if True a gpu is used
:param full_return: if True also returns the sensitivities and specificities for a multiclass problem
:return:
balanced accuracy of the model (float)
total loss on the dataloader
"""
model.eval()
columns = ["participant_id", "true_age", "predicted_age"]
results_df = pd.DataFrame(columns=columns)
total_loss = 0
for i, data in enumerate(dataloader, 0):
if use_cuda:
inputs, labels = data['image'].cuda(), data['label'].cuda()
data['covars'] = data['covars'].cuda()
else:
inputs, labels = data['image'], data['label']
data['covars'] = data['covars'].cpu()
age = data['age']
outputs = model(inputs, covars=data['covars'])
loss = criterion(outputs, labels.unsqueeze(1))
total_loss += loss.item()
predicted = outputs.data.squeeze(1)
# Generate detailed DataFrame
for idx, sub in enumerate(data['subject_ID']):
prediction = predicted[idx]
if 'v' in dataloader.dataset.normalization:
prediction *= dataloader.dataset.age_std
if 'm' in dataloader.dataset.normalization:
prediction += dataloader.dataset.age_mean
row = [sub, age[idx].item(), prediction.item()]
row_df = pd.DataFrame(np.array(row).reshape(1, -1), columns=columns)
results_df = | pd.concat([results_df, row_df]) | pandas.concat |
from pypowerbifix.client import PowerBIClient
from pypowerbifix.activity_logs import ActivityLogs
from datetime import datetime
import pandas as pd
from Credentials import client_id, username, password
# create your powerbi api client
client = PowerBIClient.get_client_with_username_password(client_id=client_id, username=username, password=password)
# When testing, only logs from December 15th, 2019 and later were available. This may change in the future though.
dt = datetime(2019, 12, 16)
logs = client.activity_logs.get_activity_logs(dt)
print(logs)
pandas_installed = True
try:
import pandas as pd
pd.set_option('display.max_columns', 500)
| pd.set_option('display.width', 1000) | pandas.set_option |
import numpy as np
import pandas as pd
from io import StringIO
import re
import csv
from csv import reader, writer
import sys
import os
import glob
import fnmatch
from os import path
import matplotlib
from matplotlib import pyplot as plt
print("You are using Zorbit Analyzer v0.1")
directory_path = input("Please enter the path to the directory of your files. All files should be in the same location: ") #Asks users for path
os.chdir(directory_path)
x = input('Input your Interproscan output gff3 file(s):') #Asks users for gff3 input
if "*" in x: #Handles the case of *.gff3
gff3_input = glob.glob("*.gff3")
else:
y = re.sub('[|; ]', ', ', x) #Substitutes possible gff3 file delimeters with commas
gff3_input = re.split(', ', y) #Splits gff3 input into a list
for i in gff3_input:
if os.path.exists(i): #Checks existence of gff3 file
pass
else:
print("There does not seem to be a file by that name. Please check your path/filename and try again")
sys.exit()
fasta_input = input('Input your fasta file:') #Asks users for fasta input file
if os.path.exists(fasta_input): #Checks existence of fasta input file
pass
else:
print("There does not seem to be a file by that name. Please check your path/filename and try again")
sys.exit()
if fnmatch.fnmatch(fasta_input, '*fastq*'):
print("Zorbit Analyzer is not specifically constructed to handle fastq files but will try. If errors convert to fasta format")
ortho_input = input ('Input your ProteinOrtho output file:') #Asks users for ProteinOrtho input
if os.path.exists(ortho_input): #Checks existence of ProteinOrtho input
pass
else:
print("There does not seem to be a file by that name. Please check your path/filename and try again")
sys.exit()
ortho_input_file_name = input ('Input your ProteinOrtho input file name (faa). Leave blank if unknown though will run slower:') #Asks users for ProteinOrtho output file
while True:
file_to_write = input('Input your desired ZorbitAnalyzer output file name: ') #Asks users for output file
if file_to_write != '': #Checks to see if user entered a file name
break
else:
print("You did not enter an output file name") #Repeatedly asks for output file name if not given
continue
Choice = ['yes', 'y', 'no', 'n']
flag = True
while flag is True:
exclusion_flag = input("Would you like to exclude sequences that do not have either Interproscan or ProteinOrtho hits? (Yes/No) ").lower()
for i in Choice:
if exclusion_flag.startswith(i):
flag = False
break
else:
continue
if exclusion_flag.startswith('y'):
exclusion_flag = 1
else:
exclusion_flag = 0
print("Analyzing files") #Lets user know input portion has completed
pdortho = pd.read_csv(ortho_input, "/t", engine="python") #Creates ProteinOrtho pd
test_file = 'test.txt'
test2_file = 'test2.txt'
test3_file = 'test3.txt'
#Testing open/closing files
def try_file(input_file): #Defining function that creates/opens user output file and truncates it before closing it
try:
open(input_file, 'w+').close()
except IOError:
print("Unable to open output file")
try_file('file_to_write.txt') #Creates/opens output file and truncates it before closing it
try_file('test.txt') #Creates/opens test file and truncates it before closing it
try_file('gff3_file_to_write.txt') #Creates/opens gff3 output file and truncates it before closing it
try_file('gff3_statsfile_to_write.txt') #Creates/opens gff3 output file and truncates it before closing i
try_file('fasta_file_to_write.txt') #Creates/opens fasta output file and truncates it before closing it
try_file('ortho_file_to_write.txt') #Creates/opens ProteinOrtho output file and truncates it before closing it
try_file('ortho_file_to_write2.txt') #Creates/opens a second ProteinOrtho output file and truncates it before closing it
try_file('zorbit_statistics.txt') #Creates/opens a statistics file and truncates it before closing it
#Defining variables for later use
fasta_file_to_write = 'fasta_file_to_write.txt' #Defining the interim fasta file to write
gff3_file_to_write = 'gff3_file_to_write.txt' #Defining the interim gff3 file to write
gff3_statsfile_to_write = 'gff3_statsfile_to_write.txt'
ortho_file_to_write = 'ortho_file_to_write.txt' #Defining the interim Protein Ortho file to write
zorbit_statistics = 'zorbit_statistics.txt' #Defining the Zorbit Statistics variable
string_to_remove1 = '##' #Removes header and gene introduction lines
string_to_remove2 = 'polypeptide' #Removes redundant polypeptide line
string_to_remove3 = 'MobiDBLite' #Removes results from MobiDBLite database
string_to_end = '##FASTA' #Sets end of file as the start of the fasta/code part of gff3 files
#fasta
fasta_file = None
fastq_file = None
fasta_type = "amino_acid"
fastq_start_character = '@'
fasta_start_character = '>' #Setting start character for fasta information line
fastq_third_line_character ='+'
fna_type = "fna"
if fna_type in fasta_input:
fasta_type = "nucleotide"
with open(fasta_input, 'r') as fasta: #Opening fasta input file to read
for line in fasta: #reading lines in fasta file
if line.startswith(fasta_start_character): #Altering lines with > but not sequence lines
fasta_file = fasta_input
break
elif line.startswith(fastq_start_character): #Altering lines with @ but not sequence lines (for fastq)
fastq_file = fasta_input
fasta_type = "nucleotide"
break
else:
print("The fasta input file does not seem to have typical fasta or fastq format")
sys.exit()
if fasta_file is not None: #Checking to see if fasta input was fasta file (should not be empty)
print("Working on fasta file")
with open(fasta_input, 'r') as fasta: #Opening fasta input file to read
with open(fasta_file_to_write, 'a') as f: #Opens the output file to append
for line in fasta: #reading lines in fasta file
if line.startswith(fasta_start_character): #Altering lines with > but not sequence lines
fasta_nostart = re.sub('>', '\n', line) #Removing > symbol and replacing with carriage return from each occurrence
fasta_nospace = ', '.join(fasta_nostart.rsplit('\n',1)) #Removes carriage return (before aa or na code) and replaces with comma
fasta_csv = ', '.join(fasta_nospace.split(' ',1)) #Removes first space (after Trinity output name) and replaces with comma
f.write(fasta_csv) #Writes output to file
else:
if not line.isspace(): #Will not write blank lines
sequence_no_carriage = re.sub('\n', '', line) #Removes carriage return from before the sequence data
sequence_no_line_break = re.sub('\r', '', sequence_no_carriage) #Removes line break from before the sequence data
f.write(sequence_no_line_break) #Writes the sequence line without line breaks or carriage returns
else:
continue
elif fastq_file is not None: #Checking to see if fasta input was fastq file (should not be empty)
print("Working on fastq file")
with open(fasta_input, 'r', encoding="latin-1") as fasta: #Opening fasta input file to read
with open(fasta_file_to_write, 'a', encoding="latin-1") as f: #Opens the output file to append
for i, line in enumerate(fasta): #reading lines in fasta file
if i == 0: # Dealing with first line differently (no line break)
fasta_nostart = re.sub('@', '', line) #Removing @ symbol from each occurrence and replaces with nothing
fasta_nospace = ', '.join(fasta_nostart.rsplit('\n',1)) #Removes carriage return (before aa or na code) and replaces with comma
fasta_csv = ', '.join(fasta_nospace.split(' ',1)) #Removes first space (after Trinity output name) and replaces with comma
f.write(fasta_csv) #Writes output to file
elif line.startswith(fastq_start_character): #Altering lines with @ but not sequence lines (for fastq)
fasta_nostart = re.sub('@', '\n', line) #Removing @ symbol from each occurrence and replaces with carriage return
fasta_nospace = ', '.join(fasta_nostart.rsplit('\n',1)) #Removes carriage return (before aa or na code) and replaces with comma
fasta_csv = ', '.join(fasta_nospace.split(' ',1)) #Removes first space (after Trinity output name) and replaces with comma
f.write(fasta_csv) #Writes output to file
elif i % 4 == 1: #Writing line 2/4 (sequence file) to output file
sequence_no_carriage = re.sub('\n', '', line) #Removes carriage return from before the sequence data
sequence_no_line_break = re.sub('\r', '', sequence_no_carriage) #Removes line break from before the sequence data
f.write(sequence_no_line_break) #Writes the sequence line without line breaks or carriage returns
else:
pass
else:
print("The input file does not seem to be in typical fasta or fastq format. Please check and try again") #Ending if atypical fasta/fastq format
sys.exit()
for i in gff3_input: #Cleaning up gff3 file prior to conversion to dataframe
with open(i, 'r') as stack:
with open(gff3_file_to_write, 'a') as f:
for line in stack:
if string_to_end in line: #Closes file at the start of the sequence data without including
f.close()
break
elif string_to_remove1 in line: #Removing header and gene introduction lines (if present)
continue
elif string_to_remove2 in line: #Removing polypeptide line (if present)
continue
elif string_to_remove3 in line: #Removing MobiDBLite database (if present)
continue
else:
f.write(line)
for i in gff3_input: #Saving unedited gff3 input into file for statistics purposes later
with open(i, 'r') as stack:
with open(gff3_statsfile_to_write, 'a') as f:
for line in stack:
if string_to_end in line: #Closes file at the start of the sequence data without including
f.close()
break
elif string_to_remove1 in line: #Removing header and gene introduction lines (if present)
continue
else:
f.write(line)
fasta_column_names = ['SeqID', 'Information', 'Sequence'] #Defining the list of fasta column names to pass to the dataframe
fastapd = pd.read_csv(fasta_file_to_write, names=fasta_column_names, engine = "python", header=None) #Creating a Pandas dataframe from the fasta output csv
SeqID_list = fastapd["SeqID"].tolist() #Saving contents of the SeqID column to a list
fasta_row_number = len(fastapd) #Counting the number of rows in the fasta dataframe for the statistics output
with open(zorbit_statistics, 'a') as f:
f.write("The number of sequences in the fasta is " + str(fasta_row_number) + "\n")
#Start orthopd
print("Working on ProteinOrtho dataframe")
orthopd = pd.read_csv(ortho_input, sep='\t', engine="python", na_values="*") #Creates a Pandas dataframe from ProteinOrtho input csv
ortho_column_names = list(orthopd.columns)
#Defining the SeqID column
if ortho_input_file_name != "":
orthopd.columns = ["SeqID" if col.startswith(ortho_input_file_name) else col for col in orthopd.columns] #Renaming the fasta input column in ProteinOrtho dataframe to SeqID to match other dataframes
else: pass
#Attempting to identify which column corresponds to the input fasta
fasta_input_split = fasta_input.split('.', 1)[0] #Trying to delete file handle from the fasta input file in case there was .fasta versus .faa, etc
orthopd_pruned = orthopd.drop(columns=['# Species', 'Genes', 'Alg.-Conn.']) #Creating a new dataframe without the first three columns which will always have data in each row in order to id longest column
if orthopd.columns.astype(str).str.contains("SeqID").any(): #Checking to see if fasta input file name is in the ProteinOrtho column name list
print("Found fasta Sequence ID column in ProteinOrtho file")
else:
print("Trying to find fasta file in ProteinOrtho file through other means")
orthopd.columns = ["SeqID" if col.startswith(fasta_input_split) else col for col in orthopd.columns] #Using the input fasta file name as a guess for the faa file name
if orthopd.columns.astype(str).str.contains("SeqID").any(): #Breaks loops if the column name has been found/replaced
print("Found fasta Sequence ID column in ProteinOrtho file")
else:
print("Attempting another way of identifying fasta file column. This may take some time")
orthopd_fasta_column_name = orthopd_pruned.count().idxmax() #Finding column with the least number of NaN which is likely the input fasta
for l in SeqID_list: #Searching to see if any values from the fastapd SeqID column (l) are in the putative SeqID ProteinOrtho column
if orthopd[orthopd_fasta_column_name].astype(str).str.contains(l).any():
orthopd.rename(columns=lambda x: x.replace(orthopd_fasta_column_name, "SeqID"), inplace=True) #Renaming the ProteinOrtho column with fasta sequence names as SeqID
break
else:
print("Final method to identify fasta file column. This may take hours")
orthopd = orthopd.drop(orthopd[(orthopd['Genes'] == 1)].index) #Gets rid of rows with just a single gene found in order to speed up full frame search
for l in SeqID_list: #Searching to see if any values from the fastapd SeqID column (l) are in the ProteinOrtho dataframe
for i in orthopd.columns:
if orthopd[i].astype(str).str.contains(l).any():
orthopd.rename(columns=lambda x: x.replace(i, "SeqID"), inplace=True) #Renaming the ProteinOrtho column with fasta sequence names as SeqID
break
orthopd = orthopd.drop(orthopd[(orthopd['SeqID'].isna())].index)#Removing SeqID rows with NaN
#Splitting the duplicated entries in the SeqID column and making new rows with a SeqID member on each but with same data otherwise
def pir2(df, c): #Defining function to split the SeqID column at each comma and place one of each split value onto a new, otherwise duplicated row
colc = df[c].astype(str).str.split(',')
clst = colc.values.astype(object).tolist()
lens = [len(l) for l in clst]
j = df.columns.get_loc(c)
v = df.values
n, m = v.shape
r = np.arange(n).repeat(lens)
return pd.DataFrame(
np.column_stack([v[r, 0:j], np.concatenate(clst), v[r, j+1:]]),
columns=orthopd.columns
)
orthopd3 = pir2(orthopd, "SeqID") #Running column split function on the SeqID column on orthopd
print("Beginning data analysis on the ProteinOrtho dataframe")
#Graph Algebraic Connectivity
orthopd_algconn_nozero = orthopd3[orthopd3['Alg.-Conn.'] != 0] #Removing zero and one counts in orthopd for graph
orthopd_algconn_noone = orthopd_algconn_nozero[orthopd_algconn_nozero['Alg.-Conn.'] != 1] #Getting the count of each Alg.Conn in the gff3 dataframe
orthopd_algconn_noone['Alg.-Conn.'].plot.hist(grid=True, bins=100,
color='#607c8e')
plt.title('Distribution of Algebraic Connectivity without Unity')
plt.xlabel('Degree of Connectivity')
plt.ylabel('Number of Genes with Degree of Connectivity')
plt.tight_layout()
plt.savefig("ProteinOrtho_AlgConn_graph_noone.png")#Saving graph to file
plt.clf()
orthopd_algconn_nozero['Alg.-Conn.'].plot.hist(grid=True, bins=100,
color='#607c8e')
plt.title('Distribution of Algebraic Connectivity')
plt.xlabel('Degree of Connectivity')
plt.ylabel('Number of Genes with Degree of Connectivity')
plt.tight_layout()
plt.savefig("ProteinOrtho_AlgConn_graph.png")#Saving graph to file
plt.clf()
#Graph Gene Counts
orthopd_gene_count_values = orthopd3['Genes'].value_counts() #Getting the count of each database in the gff3 dataframe
orthopd_gene_count_values.plot(kind='bar') #Graphing the database counts
plt.title('Graph of Gene Counts')
plt.xlabel('Number of Shared transcripts')
plt.ylabel('Number of Genes with same frequency')
plt.tight_layout()
plt.savefig("ProteinOrtho_gene_graph.png")#Saving graph to file
plt.clf()
#Start gff3pd
print("Working on gff3 dataframe")
gff3pd_column_names = ['SeqID', 'Database', 'Match type', 'Start', 'Stop', 'Score', 'Strand', 'Phase', 'Match information'] #Renaming static gff3 columns
statsgff3pd = pd.read_csv(gff3_statsfile_to_write, sep='\t', names=gff3pd_column_names, header=None, engine="python") #Creating a dataframe for gff3 stats
gff3pd_original_row_number = len(statsgff3pd) #Counting the number of rows in the original gff3pd dataframe for the statistics output
with open(zorbit_statistics, 'a') as f: #Writing the number of rows in the original gff3pd dataframe to the statistics output
f.write("The number of sequences in the original gff3 file is " + str(gff3pd_original_row_number) + "\n")
gff3pd = pd.read_csv(gff3_file_to_write, sep='\t', names=gff3pd_column_names, header=None, engine = "python") #Creating a Pandas dataframe from the gff3 output csv
gff3pd_row_number = len(gff3pd) #Counting the number of rows in the final gff3 file dataframe for the statistics output
gff3pd_max_score = gff3pd['Score'].max() #Finding maximum value in Score column of gff3 dataframe
gff3pd_without_null = gff3pd[gff3pd['Score'] != "."] #Finding minimum value in Score column of gff3 dataframe
gff3pd_without_null_or_zero = gff3pd_without_null[gff3pd_without_null['Score'] != 0.0]
gff3pd_min_score = gff3pd_without_null_or_zero['Score'].min()
statsgff3pd_without_null = statsgff3pd[statsgff3pd['Score'] != "."]
statsgff3pd_max_score = statsgff3pd_without_null['Score'].max()
with open(zorbit_statistics, 'a') as f:
f.write("The number of sequences in the gff3 file after removal of MobiDBLite and duplicates is " + str(gff3pd_row_number) + "\n") #Adding cleaned gff3 stastitics to file
f.write("The range of quality scores for the gff3 file range from " + str(gff3pd_min_score) + " to " + str(gff3pd_max_score) + "\n")#Adding range of scores to statistics file
f.write("The maximum quality score for the original gff3 file is " + str(statsgff3pd_max_score) + "\n")
#Graph database distribution
gff3pd_database_count_values = gff3pd['Database'].value_counts() #Getting the count of each database in the gff3 dataframe
gff3pd_database_count_values.plot(kind='bar') #Graphing the database counts
plt.title('Distribution of Database hits')
plt.xlabel('Database name')
plt.ylabel('Number of Database hits')
plt.tight_layout()
plt.savefig("Gff3_database_graph.png")#Saving graph to file
plt.clf()
#Preparing dataframes for merging
print("Preparing dataframes for merge")
gff3pd['SeqID'] = gff3pd['SeqID'].astype(str) #Setting column type as string
orthopd3['SeqID'] = orthopd3['SeqID'].astype(str) #Setting column type as string
fastapd['SeqID'] = fastapd['SeqID'].astype(str) #Setting column type as string
#Dealing with fna versus faa
protein_flag = 0
if fasta_type == "nucleotide": #Checking to see if the fasta_type is nucleotide
gff3pd_split = gff3pd['SeqID'].str.rsplit('_', n=2, expand=True) #Removing the extra two numbers after the fasta SeqID to allow match
gff3pd['SeqID'] = gff3pd_split[0] #Setting the gff3 SeqID column as the split column
orthopd_split = orthopd3['SeqID'].str.rsplit('_', n=2, expand=True) #Removing the extra two numbers after the fasta SeqID to allow match
orthopd['SeqID'] = orthopd_split[0] #Setting the ProteinOrtho SeqID column as the split column
else:
#Pulling out reading frame information
protein_flag = 1
gff3pd['SeqID2'] = gff3pd['SeqID']
gff3pd_split = gff3pd['SeqID2'].str.rsplit('_', n=1, expand=True) #Removing the extra number after the fasta SeqID
gff3pd['SeqID2'] = gff3pd_split[0] #Setting the gff3 SeqID column as the split column
gff3pd_split = gff3pd['SeqID2'].str.rsplit('_', n=1, expand=True) #Splitting the frame number out
gff3pd['SeqID2'] = gff3pd_split[0] #Setting the gff3 SeqID column
gff3pd['Reading_Frame'] = gff3pd_split[1] #Setting the gff3 Frame column
gff3pd = gff3pd.drop(['SeqID2'], axis=1)
orthopd3['SeqID2'] = orthopd3['SeqID']
orthopd_split = orthopd3['SeqID2'].str.rsplit('_', n=1, expand=True) #Removing the extra two numbers after the fasta SeqID to allow match
orthopd3['SeqID2'] = orthopd_split[0] #Setting the ProteinOrtho SeqID column as the split column
orthopd_split = orthopd3['SeqID2'].str.rsplit('_', n=1, expand=True) #Splitting the frame number out
orthopd3['SeqID2'] = orthopd_split[0] #Setting the orthopd SeqID column
orthopd3['Reading_Frame'] = orthopd_split[1] #Setting the gff3 Frame column
orthopd = orthopd3.drop(['SeqID2'], axis=1)
#Merging
print("Combining dataframes")
gff3_ortho_merge = pd.merge(orthopd, gff3pd, how='outer', on=['SeqID']) #Merging the ProteinOrtho and interproscan dataframes
all_merge = pd.merge(gff3_ortho_merge, fastapd, how='outer', on=['SeqID']) #Merging the fasta dataframe with the combined ProteinOrtho/Interproscan dataframes
#Adding marks to merged dataframe to make fasta
all_merge['SeqID'] = all_merge['SeqID'].apply(lambda x: f'>{x}') #Placing > at the beginning of each new line and a tab at the end of SeqID
all_merge['Sequence'] = all_merge['Sequence'].apply(lambda x: f'\n{x}') #Placing a new line before the Sequence data
all_merge = all_merge[ ['SeqID'] + [ col for col in all_merge.columns if col != 'SeqID' ] ] #Moving SeqID to the far left of the dataframe
all_merge = all_merge[ [ col for col in all_merge.columns if col != 'Sequence' ] + ['Sequence'] ] #Moving Sequence to the far right of the dataframe
#Statistics on the merged dataframe
all_merge_both = all_merge.drop(all_merge[((all_merge['Database'].isna()) | (all_merge['Genes'] == 1))].index)
all_merge_neither = all_merge.drop(all_merge[((all_merge['Database'].notna()) | (all_merge['Genes'] !=1))].index)
all_merge_just_ortho = all_merge.drop(all_merge[((all_merge['Database'].notna()) | (all_merge['Genes'] == 1))].index)
all_merge_just_inter = all_merge.drop(all_merge[((all_merge['Database'].isna()) | (all_merge['Genes'] !=1))].index)
all_merge_all = len(pd.unique(all_merge['SeqID'])) #Calculating the number of unique sequences
all_merge_both = len(pd.unique(all_merge_both['SeqID'])) #Calculating unique sequences with both interproscan and proteinortho hits
all_merge_neither = len(pd.unique(all_merge_neither['SeqID'])) #Calculating unique sequences without interproscan or proteinortho hits
all_merge_just_ortho = len( | pd.unique(all_merge_just_ortho['SeqID']) | pandas.unique |
"""test_split_utils.py: tests for split_utils.py"""
from os import path
from math import floor
from datetime import datetime, timedelta
from tinydb import TinyDB, Query
import pandas as pd
import pytest
import publicAPI.split_utils as split_utils
import publicAPI.config as api_utils
import publicAPI.crest_utils as crest_utils
import publicAPI.forecast_utils as forecast_utils
import publicAPI.exceptions as exceptions
import helpers
HERE = path.abspath(path.dirname(__file__))
ROOT = path.dirname(HERE)
SPLIT_FILE = path.join(ROOT, 'publicAPI', 'split_info.json')
SPLIT_CACHE = path.join(ROOT, 'publicAPI', 'cache', 'travis_splitcache.json')
DAYS_SINCE_SPLIT = 10
TEST_DATE = datetime.utcnow() - timedelta(days=DAYS_SINCE_SPLIT)
FUTURE_DATE = datetime.utcnow() + timedelta(days=DAYS_SINCE_SPLIT)
DEMO_SPLIT = {
'type_id':35,
'type_name':'Tritanium',
'original_id':34,
'new_id':35,
'split_date':TEST_DATE.strftime('%Y-%m-%d'),
'bool_mult_div':'False',
'split_rate': 10
}
DEMO_UNSPLIT = {
'type_id':34,
'type_name':'Pyerite',
'original_id':34,
'new_id':35,
'split_date':FUTURE_DATE.strftime('%Y-%m-%d'),
'bool_mult_div':'True',
'split_rate': 10
}
DEMO_NOSPLIT = {
'type_id':35,
'type_name':'Tritanium',
'original_id':35,
'new_id':35,
'split_date':TEST_DATE.strftime('%Y-%m-%d'),
'bool_mult_div':'False',
'split_rate': 10
}
ROOT_CONFIG = helpers.get_config(
path.join(ROOT, 'scripts', 'app.cfg')
)
TEST_CONFIG = helpers.get_config(
path.join(HERE, 'test_config.cfg')
)
def test_splitinfo_happypath():
"""test SplitInfo behavior"""
split_obj = split_utils.SplitInfo(DEMO_SPLIT)
## Validate data inside obj ##
assert split_obj.type_id == DEMO_SPLIT['type_id']
assert split_obj.type_name == DEMO_SPLIT['type_name']
assert split_obj.original_id == DEMO_SPLIT['original_id']
assert split_obj.new_id == DEMO_SPLIT['new_id']
assert split_obj.split_date == datetime.strptime(DEMO_SPLIT['split_date'], '%Y-%m-%d')
assert split_obj.date_str == DEMO_SPLIT['split_date']
assert split_obj.bool_mult_div == False
assert split_obj.split_rate == DEMO_SPLIT['split_rate']
assert split_obj.current_typeid() == DEMO_SPLIT['new_id']
## Validate magicmethod behavior ##
assert int(split_obj) == DEMO_SPLIT['new_id']
assert bool(split_obj) #should be True
assert str(split_obj) == DEMO_SPLIT['type_name']
test_price = 3.5
test_volume = 1e6
expected_price = test_price / DEMO_SPLIT['split_rate']
expected_volume = test_volume * DEMO_SPLIT['split_rate']
assert test_price * split_obj == expected_price
assert split_obj * test_price == expected_price
assert test_volume / split_obj == expected_volume
def test_splitinfo_reverse():
"""validate SplitInfo with "True" bool_mult_div"""
split_obj = split_utils.SplitInfo(DEMO_UNSPLIT)
## Validate data inside obj ##
assert split_obj.bool_mult_div == True
assert split_obj.current_typeid() == DEMO_UNSPLIT['original_id']
test_price = 3.5
test_volume = 1e6
expected_price = test_price * DEMO_SPLIT['split_rate']
expected_volume = test_volume / DEMO_SPLIT['split_rate']
assert test_price * split_obj == expected_price
assert split_obj * test_price == expected_price
assert test_volume / split_obj == expected_volume
def test_splitinfo_throws():
"""make sure bad behavior is caught"""
short_profile = dict(DEMO_SPLIT)
short_profile.pop('split_rate', None)
with pytest.raises(exceptions.InvalidSplitConfig):
split_obj = split_utils.SplitInfo(short_profile)
bad_split = dict(DEMO_SPLIT)
bad_split['split_rate'] = 'bacon'
with pytest.raises(exceptions.InvalidSplitConfig):
split_obj = split_utils.SplitInfo(bad_split)
bad_date = dict(DEMO_SPLIT)
bad_date['split_date'] = 'Tomorrow'
with pytest.raises(exceptions.InvalidSplitConfig):
split_obj = split_utils.SplitInfo(bad_date)
bad_bool = dict(DEMO_SPLIT)
bad_bool['bool_mult_div'] = 'bacon'
with pytest.raises(exceptions.InvalidSplitConfig):
split_obj = split_utils.SplitInfo(bad_bool)
def test_load_data():
"""push data into global scope for testing"""
api_utils.SPLIT_INFO = split_utils.read_split_info()
demosplit_obj = split_utils.SplitInfo(DEMO_SPLIT)
revrsplit_obj = split_utils.SplitInfo(DEMO_UNSPLIT)
api_utils.SPLIT_INFO[demosplit_obj.type_id] = demosplit_obj
api_utils.SPLIT_INFO[revrsplit_obj.type_id] = revrsplit_obj
def test_datetime_helper():
"""validate datetime helper"""
short_string = '2017-04-01'
long_string = '2017-04-01T12:14:10'
bad_string = '2017-04-01T12:14:10-07:00'
short_datetime = split_utils.datetime_helper(short_string)
long_datetime = split_utils.datetime_helper(long_string)
with pytest.raises(ValueError):
bad_datetime = split_utils.datetime_helper(bad_string)
def test_split_history_throws():
"""make sure fetch_split_history throws expected errors"""
with pytest.raises(exceptions.NoSplitConfigFound):
split_obj = split_utils.fetch_split_history(
TEST_CONFIG.get('TEST', 'region_id'),
int(TEST_CONFIG.get('TEST', 'alt_id')) + 1,
api_utils.SwitchCCPSource.EMD
)
SPLIT_CACHE_FILE = path.join(
ROOT, 'publicAPI', 'cache', TEST_CONFIG.get('TEST', 'splitcache_file')
)
def test_fetch_cache_data():
"""fetch data from cache and make sure shape is correct"""
cache_data = split_utils.fetch_split_cache_data(
TEST_CONFIG.get('TEST', 'region_id'),
TEST_CONFIG.get('TEST', 'type_id'),
#split_cache_file=SPLIT_CACHE_FILE
)
missing_keys = set(cache_data.columns.values) - set(split_utils.KEEP_COLUMNS)
assert missing_keys == set()
def test_fetch_cache_fail():
"""make sure bad-path is covered"""
with pytest.raises(exceptions.NoSplitDataFound):
cache_data = split_utils.fetch_split_cache_data(
TEST_CONFIG.get('TEST', 'region_id'),
int(TEST_CONFIG.get('TEST', 'bad_typeid')),
#split_cache_file=SPLIT_CACHE_FILE
)
def test_execute_split_forward():
"""check if execute_split works as expected"""
split_obj = split_utils.SplitInfo(DEMO_SPLIT)
cache_data = split_utils.fetch_split_cache_data(
TEST_CONFIG.get('TEST', 'region_id'),
TEST_CONFIG.get('TEST', 'type_id'),
#split_cache_file=SPLIT_CACHE_FILE
)
split_data = split_utils.execute_split(
cache_data.copy(), #copy b/c otherwise passed by reference
split_obj
)
price_mod = split_obj.split_rate
if not split_obj.bool_mult_div:
price_mod = 1/price_mod
for col_name in split_utils.PRICE_KEYS:
price_diff = abs(split_data[col_name] - (cache_data[col_name] * price_mod))
assert price_diff.max() < float(TEST_CONFIG.get('TEST', 'float_limit'))
#float() is weird, look for difference to be trivially small
vol_mod = 1/price_mod
for col_name in split_utils.VOLUME_KEYS:
vol_diff = abs(split_data[col_name] - (cache_data[col_name] * vol_mod))
assert vol_diff.max() < float(TEST_CONFIG.get('TEST', 'float_limit'))
def test_execute_split_backwards():
"""check if execute_split works as expected"""
split_obj = split_utils.SplitInfo(DEMO_UNSPLIT)
cache_data = split_utils.fetch_split_cache_data(
TEST_CONFIG.get('TEST', 'region_id'),
TEST_CONFIG.get('TEST', 'type_id'),
#split_cache_file=SPLIT_CACHE_FILE
)
split_data = split_utils.execute_split(
cache_data.copy(), #copy b/c otherwise passed by reference
split_obj
)
price_mod = split_obj.split_rate
if not split_obj.bool_mult_div:
price_mod = 1/price_mod
for col_name in split_utils.PRICE_KEYS:
price_diff = abs(split_data[col_name] - (cache_data[col_name] * price_mod))
assert price_diff.max() < float(TEST_CONFIG.get('TEST', 'float_limit'))
vol_mod = 1/price_mod
for col_name in split_utils.VOLUME_KEYS:
vol_diff = abs(split_data[col_name] - (cache_data[col_name] * vol_mod))
assert vol_diff.max() < float(TEST_CONFIG.get('TEST', 'float_limit'))
@pytest.mark.incremental
class TestNoSplit:
"""validate behavior if there's no split to perform"""
test_type_id = DEMO_UNSPLIT['type_id']
def test_future_split_esi(self):
"""validate on ESI"""
test_data_esi = split_utils.fetch_split_history(
TEST_CONFIG.get('TEST', 'region_id'),
self.test_type_id,
api_utils.SwitchCCPSource.ESI,
config=ROOT_CONFIG
)
assert test_data_esi.equals(
crest_utils.fetch_market_history(
TEST_CONFIG.get('TEST', 'region_id'),
self.test_type_id,
config=ROOT_CONFIG
)
)
def test_future_split_emd(self):
"""valdiate with EMD source"""
test_data_emd = split_utils.fetch_split_history(
TEST_CONFIG.get('TEST', 'region_id'),
self.test_type_id,
fetch_source=api_utils.SwitchCCPSource.EMD,
data_range=TEST_CONFIG.get('TEST', 'history_count'),
config=ROOT_CONFIG
)
emd_data_raw = forecast_utils.fetch_market_history_emd(
TEST_CONFIG.get('TEST', 'region_id'),
self.test_type_id,
data_range=TEST_CONFIG.get('TEST', 'history_count'),
config=ROOT_CONFIG
)
assert test_data_emd.equals(forecast_utils.parse_emd_data(emd_data_raw['result']))
def test_short_split(self):
"""make sure escaped if split was too far back"""
short_days = floor(DAYS_SINCE_SPLIT/2)
test_data_emd = split_utils.fetch_split_history(
TEST_CONFIG.get('TEST', 'region_id'),
DEMO_SPLIT['type_id'],
data_range=short_days,
config=ROOT_CONFIG
)
emd_data_raw = forecast_utils.fetch_market_history_emd(
TEST_CONFIG.get('TEST', 'region_id'),
DEMO_SPLIT['type_id'],
data_range=short_days,
config=ROOT_CONFIG
)
assert test_data_emd.equals(
forecast_utils.parse_emd_data(emd_data_raw['result']))
def days_since_date(date_str):
"""return number of days since date requested
Args:
date_str (str)
Returns
int: number of days since date
"""
demo_date = split_utils.datetime_helper(date_str)
delta = datetime.utcnow() - demo_date
return delta.days
def prep_raw_data(
data,
min_date
):
"""clean up data for testing
Args:
data (:obj:`pandas.DataFrame`): dataframe to clean (COPY)
min_date (str): datetime to filter to
Returns:
pandas.DataFrame: clean_data
"""
clean_data = data[data.date >= min_date]
clean_data = clean_data[split_utils.KEEP_COLUMNS]
clean_data.sort_values(
by='date',
ascending=False,
inplace=True
)
return clean_data
def validate_plain_data(
raw_data,
split_data,
float_limit=float(TEST_CONFIG.get('TEST', 'float_limit'))
):
"""validate data that did not split
Args:
raw_data (:obj:`pandas.DataFrame`): raw data (A group)
split_data (:obj:`pandas.DataFrame`): split data (B group)
float_limit (float): maximum deviation for equality test
Returns:
(None): asserts internally
"""
for column in split_data.columns.values:
print(split_data[column])
print(raw_data[column])
if column == 'date':
assert split_data[column].equals(raw_data[column])
elif column == 'index':
continue
else:
diff = abs(pd.to_numeric(split_data[column]) - | pd.to_numeric(raw_data[column]) | pandas.to_numeric |
import numpy as np
import pandas as pd
from analysis.transform_fast import load_raw_cohort, transform
def test_immuno_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF IMMRX_DAT <> NULL | Select | Next
if pd.notnull(row["immrx_dat"]):
assert row["immuno_group"]
continue
# IF IMMDX_COV_DAT <> NULL | Select | Reject
if pd.notnull(row["immdx_cov_dat"]):
assert row["immuno_group"]
else:
assert not row["immuno_group"]
def test_ckd_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF CKD_COV_DAT <> NULL (diagnoses) | Select | Next
if pd.notnull(row["ckd_cov_dat"]):
assert row["ckd_group"]
continue
# IF CKD15_DAT = NULL (No stages) | Reject | Next
if pd.isnull(row["ckd15_dat"]):
assert not row["ckd_group"]
continue
# IF CKD35_DAT>=CKD15_DAT | Select | Reject
if gte(row["ckd35_dat"], row["ckd15_dat"]):
assert row["ckd_group"]
else:
assert not row["ckd_group"]
def test_ast_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF ASTADM_DAT <> NULL | Select | Next
if pd.notnull(row["astadm_dat"]):
assert row["ast_group"]
continue
# IF AST_DAT <> NULL | Next | Reject
if pd.isnull(row["ast_dat"]):
assert not row["ast_group"]
continue
# IF ASTRXM1 <> NULL | Next | Reject
if | pd.isnull(row["astrxm1_dat"]) | pandas.isnull |
# coding: utf-8
import pandas as pd
import numpy as np
import cv2 # Used to manipulated the images
seed = 1207
np.random.seed(seed)
# Import Keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
from scipy.ndimage import gaussian_filter
from skimage import img_as_float
from skimage.morphology import reconstruction
from sklearn.model_selection import StratifiedKFold
import model_simple_nodrop as model_source
# ## Load Training Data
df_train = pd.read_json('./input/train.json') # this is a dataframe
def get_scaled_imgs(df):
imgs = []
for i, row in df.iterrows():
#make 75x75 image
band_1 = np.array(row['band_1']).reshape(75, 75)
band_2 = np.array(row['band_2']).reshape(75, 75)
band_3 = band_1 + band_2 # plus since log(x*y) = log(x) + log(y)
# Rescale
a = (band_1 - band_1.mean()) / (band_1.max() - band_1.min())
b = (band_2 - band_2.mean()) / (band_2.max() - band_2.min())
c = (band_3 - band_3.mean()) / (band_3.max() - band_3.min())
imgs.append(np.dstack((a, b, c)))
return np.array(imgs)
def get_more_images(imgs):
more_images = []
vert_flip_imgs = []
hori_flip_imgs = []
for i in range(0,imgs.shape[0]):
a=imgs[i,:,:,0]
b=imgs[i,:,:,1]
c=imgs[i,:,:,2]
av=cv2.flip(a,1)
ah=cv2.flip(a,0)
bv=cv2.flip(b,1)
bh=cv2.flip(b,0)
cv=cv2.flip(c,1)
ch=cv2.flip(c,0)
vert_flip_imgs.append(np.dstack((av, bv, cv)))
hori_flip_imgs.append(np.dstack((ah, bh, ch)))
v = np.array(vert_flip_imgs)
h = np.array(hori_flip_imgs)
more_images = np.concatenate((imgs,v,h))
return more_images
Xtrain = get_scaled_imgs(df_train)
Ytrain = np.array(df_train['is_iceberg'])
df_train.inc_angle = df_train.inc_angle.replace('na',0)
idx_tr = np.where(df_train.inc_angle>0)
Ytrain = Ytrain[idx_tr[0]]
Xtrain = Xtrain[idx_tr[0],...]
Xtr_more = get_more_images(Xtrain)
Ytr_more = np.concatenate((Ytrain,Ytrain,Ytrain))
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
for fold_n, (train, test) in enumerate(kfold.split(Xtr_more, Ytr_more)):
print("FOLD nr: ", fold_n)
model = model_source.get_model()
#model.summary()
MODEL_FILE = 'mdl_simple_k{}_wght.hdf5'.format(fold_n)
batch_size = 32
mcp_save = ModelCheckpoint(MODEL_FILE, save_best_only=True, monitor='val_loss', mode='min')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=15, verbose=1, epsilon=1e-4, mode='min')
model.fit(Xtr_more[train], Ytr_more[train],
batch_size=batch_size,
epochs=32,
verbose=1,
validation_data=(Xtr_more[test], Ytr_more[test]),
callbacks=[mcp_save, reduce_lr_loss])
model.load_weights(filepath = MODEL_FILE)
score = model.evaluate(Xtr_more[test], Ytr_more[test], verbose=1)
print('\n Val score:', score[0])
print('\n Val accuracy:', score[1])
SUBMISSION = './result/simplenet/sub_simple_v1_{}.csv'.format(fold_n)
df_test = pd.read_json('./input/test.json')
df_test.inc_angle = df_test.inc_angle.replace('na',0)
Xtest = (get_scaled_imgs(df_test))
pred_test = model.predict(Xtest)
submission = pd.DataFrame({'id': df_test["id"], 'is_iceberg': pred_test.reshape((pred_test.shape[0]))})
print(submission.head(10))
submission.to_csv(SUBMISSION, index=False)
print("submission saved")
# Stack all
wdir = './result/simplenet/'
stacked_1 = pd.read_csv(wdir + 'sub_simple_v1_0.csv')
stacked_2 = | pd.read_csv(wdir + 'sub_simple_v1_1.csv') | pandas.read_csv |
import os
#import dill
import numpy as np
import pandas as pd
from Bio import SeqIO, Seq
import scipy.stats as st
import deepak.globals
import deepak.utilities
from deepak.library import MutationLibrary
from deepak.plot import replace_wt, all_correlations, make_heatmaps, make_fig_dir
pad = 948
target_T3 = ":917*ag"
target_G3 = ":932*ag"
target_T5 = ":50*ag"
target_G5 = ":41*ag"
# MAYBE: calculate common mutations, place in separate data structure
class Quantification:
"""
Class used to turn the Valid.csv output file into a pandas data frame suitable for plotting with sfmap or other
inspection. The resultant data frame has rows corresponding to integer positions in the sequence and columns
corresponding to amino acids.
"""
def __init__(self, config_file, lib_fn, reference_fn, pos):
self.config_file = config_file
self.library = MutationLibrary()
self.library.add_reference_fasta(reference_fn)
self.reference_AA = Seq.translate(self.library.reference)
self.library.construct(lib_fn, pos)
# get library info to create shape of DF
self.counts = None
self.edits = None
def configure(self, config_file):
with open(config_file) as config:
for line in config:
attribute, value = line.split()
def create_df(self):
lib_members = [translate_codon(item, self.library.reference) for item in self.library.keys() if item != "wt"]
start = min(lib_members, key=lambda x: x[0])[0]
end = max(lib_members, key=lambda x: x[0])[0]
self.counts = pd.DataFrame(np.zeros((1+end-start, 20)), index=range(start, end+1), columns=deepak.globals.AA_LIST)
self.edits = self.counts.copy()
def count_csv(self, csv, target):
data = pd.read_csv(csv, header=0, index_col=0)
wt_counts = 0
wt_edits = 0
for i, row in data.iterrows():
identity = row["lib_identity"]
if identity == "wt":
wt_counts += 1
if search_snp_paf(row["cs_tag"], target):
wt_edits += 1
else:
position, aa = translate_codon(identity, self.library.reference)
self.counts.loc[position, aa] += 1
if search_snp_paf(row["cs_tag"], target):
self.edits.loc[position, aa] += 1
self.tally_wt(wt_counts, wt_edits)
return
def tally_wt(self, counts, edits):
for i in self.counts.index:
aa = self.reference_AA[i]
self.counts.loc[i, aa] = counts
self.edits.loc[i, aa] = edits
return
def translate_codon(cs, reference):
""" Translates a cs string into a tuple in the form (position, amino_acid) """
fields = deepak.utilities.chunk_paf(cs)
position = int(fields[0][1:])
idx = position // 3
pad = position % 3
wt_codon = reference[3 * idx:3 * idx + 3]
codon = wt_codon
for item in fields[1:]:
if item[0] == ":":
pad += int(item[1])
continue
elif item[0] == "*":
assert wt_codon[pad] == item[1].upper()
codon = codon[:pad] + item[2].upper() + codon[1 + pad:]
pad += 1
else:
raise Exception("Invalid cs string")
return idx, Seq.translate(codon)
def load_pickled_data(fn):
with open(fn, mode="rb") as infile:
analysis = dill.load(infile)
return analysis
def search_snp_paf(paf_record, target):
target_fields = deepak.utilities.chunk_paf(target)
assert len(target_fields) == 2 # Should be ":n*{ref}{var}"
target_loc = int(target_fields[0][1:])
location = 0
fields = deepak.utilities.chunk_paf(paf_record)
for i, item in enumerate(fields):
if location == target_loc and item == target_fields[1]:
return True
elif item[0] == ":":
location += int(item[1:])
elif item[0] == "*":
location += 1
else:
raise Exception("Disallowed character in CS string, could be indel")
return False
def detect_edits(item, target):
"""
Count reads in item which contain target mutation.
Returns the number of reads containing the target mutation and the total number of reads
*item* is a length 2 tuple comprising a library entry in the form (*name*, *list of PafRecords or cs strings*)
*target* is a cs string specifying the target mutation to search for
"""
name = item[0]
edits = list(map(search_snp_paf, item[1], [target] * len(item[1])))
return np.sum(edits), len(edits)
def decode_paf(paf_str):
global reference_fn, pad
ref = SeqIO.read(reference_fn, "fasta")
fields = deepak.utilities.chunk_paf(paf_str)
dna_loc = int(fields[0][1:])
pos = (dna_loc + pad) // 3
result_dna = ref[:dna_loc]
for mut in fields[1:]:
if mut.startswith("*"):
result_dna += mut[2]
dna_loc += 1
else:
n = int(mut[1:])
result_dna += ref[dna_loc:dna_loc + n]
dna_loc += n
if dna_loc < len(ref):
result_dna += ref[dna_loc:]
aa = result_dna.translate()[pos - (pad // 3)]
return int(pos), aa
def add_seq_info(data_frame):
positions, amino_acids = list(zip(*map(decode_paf, data_frame["name"])))
data_frame["position"] = positions
data_frame["amino_acid"] = amino_acids
return data_frame
def read_analysis(analysis_obj, target_mutation):
data = {"name": [], "edited_counts": [], "counts": []}
for member in analysis_obj.library.items():
edited, counts = detect_edits(member, target_mutation)
data["name"].append(member[0])
data["edited_counts"].append(edited)
data["counts"].append(counts)
df = pd.DataFrame(data)
wt = df.loc[df.name == "wt"]
df = df.loc[df.name != "wt"]
return df, wt
def z(p, n, wt_rate, wt_n, pooled=True, size=1):
if n < size:
return np.nan
if pooled:
combined_p = (wt_rate * wt_n + n * p) / (n + wt_n)
return (p - wt_rate) / np.sqrt(combined_p * (1 - combined_p) * ((1 / n) + (1 / wt_n)))
return (p - wt_rate) / np.sqrt((wt_rate * (1 - wt_rate) / wt_n) + (p * (1 - p) / n))
def add_stats(df, wt_rate, wt_n):
n = 0
while True:
if "rep"+str(n)+"_counts" not in df.columns:
break
n += 1
x_bar = 1
for i in range(1, n):
rep = "rep"+str(i)+"_"
# Zero total counts results in NaN
p = df[rep+"counts"]/df["counts"]
# Members with zero counts in one replicate default to rate of other replicate, i.e. NaN ** 0 == 1
r = (df[rep+"edited_counts"]/df[rep+"counts"]).fillna(0)
x_bar *= np.power(r, p)
df["geom_editing_rate"] = x_bar
df["editing_rate"] = df["edited_counts"] / df["counts"]
df["z-score"] = list(map(z, df["editing_rate"], df["counts"], [wt_rate] * len(df.index), [wt_n] * len(df.index)))
df["p-value"] = st.norm.sf(np.abs(df["z-score"])) * 2 # two-tailed test
combined_p = (wt_rate * wt_n + df["editing_rate"] * df["counts"]) / (df["counts"] + wt_n)
df["std_error"] = np.sqrt(combined_p * (1 - combined_p) * ((1 / df["counts"]) + (1 / wt_n)))
return df
def reference_aa(df, reference):
start = df["position"].min()
end = df["position"].max()
ref = SeqIO.read(reference, "fasta")
wt_aa_seq = str(ref.translate()[int(start - pad // 3):int(end - pad // 3) + 1].seq)
return wt_aa_seq
def fill_aa_seq(df_seq, wt_aa_seq):
x = set(df_seq["position"])
least = min(x)
y = set(range(least, least+len(wt_aa_seq)))
z = x.difference(y)
while len(z) > 0:
item = z.pop()
new_row = | pd.DataFrame({"position": [item]*20, "amino_acid": deepak.globals.AA_LIST}) | pandas.DataFrame |
"""
@authors: <NAME> / <NAME>
goal: edf annotation reader
Modified: <NAME>, Stanford University, 2018
"""
import re
import numpy as np
import pandas as pd
import xmltodict
def read_edf_annotations(fname, annotation_format="edf/edf+"):
"""read_edf_annotations
Parameters:
-----------
fname : str
Path to file.
Returns:
--------
annot : DataFrame
The annotations
"""
with open(fname, 'r', encoding='utf-8',
errors='ignore') as annotions_file:
tal_str = annotions_file.read()
if "edf" in annotation_format:
if annotation_format == "edf/edf+":
exp = '(?P<onset>[+\-]\d+(?:\.\d*)?)' + \
'(?:\x15(?P<duration>\d+(?:\.\d*)?))?' + \
'(\x14(?P<description>[^\x00]*))?' + '(?:\x14\x00)'
elif annotation_format == "edf++":
exp = '(?P<onset>[+\-]\d+.\d+)' + \
'(?:(?:\x15(?P<duration>\d+.\d+)))' + \
'(?:\x14\x00|\x14(?P<description>.*?)\x14\x00)'
annot = [m.groupdict() for m in re.finditer(exp, tal_str)]
good_annot = pd.DataFrame(annot)
good_annot = good_annot.query('description != ""').copy()
good_annot.loc[:, 'duration'] = good_annot['duration'].astype(float)
good_annot.loc[:, 'onset'] = good_annot['onset'].astype(float)
elif annotation_format == "xml":
annot = xmltodict.parse(tal_str)
annot = annot['PSGAnnotation']["ScoredEvents"]["ScoredEvent"]
good_annot = pd.DataFrame(annot)
return good_annot
def resample_30s(annot):
"""resample_30s: to resample annot dataframe when durations are multiple
of 30s
Parameters:
-----------
annot : pandas dataframe
the dataframe of annotations
Returns:
--------
annot : pandas dataframe
the resampled dataframe of annotations
"""
annot["start"] = annot.Start.values.astype(np.float).astype(np.int)
df_end = annot.iloc[[-1]].copy()
df_end['start'] += df_end['Duration'].values.astype(np.float)
df_end.index += 1
annot = annot.append(df_end)
annot = annot.set_index('start')
annot.index = | pd.to_timedelta(annot.index, unit='s') | pandas.to_timedelta |
import pandas as pd
import tensorflow as tf
from pathlib import Path
from datetime import datetime
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.models import load_model
#enviroment settings
path = Path(__file__).parent.absolute()/'Deep Training'
name_data = 'none_'#''
metric = 'binary_accuracy'
minimise = False
#parameter settings
model_keys = ['optimizer','layers','activations','dropouts']
blueprint_keys = ['predictors','identifier']+model_keys
#log settings
log_keys = ['timestamp']+blueprint_keys+['dimensions','length','nodes','loss',metric,'time','epochs']
sort_fields = [metric, 'loss', 'epochs', 'nodes', 'time']
sort_conditions = [minimise, True, True, True, True]
predictor_log_path = path/'Logs'/(name_data+'predictor_evaluation_log.csv')
parameter_log_path = path/'Logs'/(name_data+'parameter_evaluation_log.csv')
re_parameter_log_path = path/'Logs'/(name_data+'re_parameter_evaluation_log.csv')
#model settings
models_path = path/'Models'
#data settings
data_path = path/'Data'
targets_name = 'None_Targets.csv'
predictors_name = 'None_Predictors.csv'
targets_columns = ['Home: Win','Visiting: Win']
predictors_columns = None
targets_index = False
predictors_index = False
#data enviroment
targets = | pd.read_csv(data_path/targets_name, usecols=targets_columns, index_col=targets_index) | pandas.read_csv |
import re
from pathlib import Path
import pandas as pd
import numpy as np
DATA_DIR = Path(__file__).parents[1] / 'data'
def load_uci():
"""Load data from http://archive.ics.uci.edu/ml/datasets/diabetes"""
data_path = str(DATA_DIR / 'public' / 'uci')
dfs = []
for p in Path(data_path).iterdir():
match = re.search(r'\d\d$', str(p))
if match:
df = pd.read_csv(
p,
sep='\t',
header=None,
names=['Date', 'Time', 'Code', 'Value'])
df['person_id'] = int(match[0])
dfs.append(df)
df = pd.concat(dfs)
code_map = {
33: 'Regular insulin dose',
34: 'NPH insulin dose',
35: 'UltraLente insulin dose',
48: 'Unspecified blood glucose measurement',
57: 'Unspecified blood glucose measurement',
58: 'Pre-breakfast blood glucose measurement',
59: 'Post-breakfast blood glucose measurement',
60: 'Pre-lunch blood glucose measurement',
61: 'Post-lunch blood glucose measurement',
62: 'Pre-supper blood glucose measurement',
63: 'Post-supper blood glucose measurement',
64: 'Pre-snack blood glucose measurement',
65: 'Hypoglycemic symptoms',
66: 'Typical meal ingestion',
67: 'More-than-usual meal ingestion',
68: 'Less-than-usual meal ingestion',
69: 'Typical exercise activity',
70: 'More-than-usual exercise activity',
71: 'Less-than-usual exercise activity',
72: 'Unspecified special event',
}
df['code_cat'] = df['Code'].map(code_map)
df['date_time'] = pd.to_datetime(
df['Date'] + ' ' + df['Time'],
format="%m-%d-%Y %H:%M",
errors='coerce')
df = df.drop(columns=['Date', 'Time'])
print(f"{sum(df['date_time'].isna())} records failed to convert to date")
df = df.loc[~df['date_time'].isna()]
# drop entries entered at 8AM, 12AM, 4PM, 10PM
mask = (df['date_time'].dt.hour.isin([8, 12, 18, 22]) &
(df['date_time'].dt.minute == 0))
df = df.loc[~mask]
return df
def load_tidepool_dummy():
"""Assumed fake data from https://github.com/tidepool-org
Could be a useful data structure to emulate"""
data_path = ('https://raw.githubusercontent.com/tidepool-org/'
'data-analytics/master/examples/example-data/'
'example-from-j-jellyfish.csv')
return pd.read_csv(data_path)
def load_so_pump_raw():
data_path = str(DATA_DIR / 'private' / 'omnipod_pump' /
'omnipod_export_2019-04-13.TAB')
df = pd.read_csv(data_path, sep='\t', encoding='latin1')
cols = [
'DATEEVENT', 'TIMESLOT', 'EVENTTYPE', 'VENDOR_EVENT_ID', 'KEY0',
'KEY1', 'KEY2', 'I0', 'I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8',
'I9', 'D0', 'D1', 'D2', 'D3', 'D4', 'C0', 'C1', 'C2', 'COMMENT'
]
df = df[cols]
df['DATEEVENT'] = pd.to_datetime(
pd.to_numeric(df['DATEEVENT']) - 2, origin='1900-01-01',
unit="D").dt.round('s')
return df.sort_values('DATEEVENT').reset_index(drop=True)
def load_so_pump():
"""Data pulled by copy & paste from Abbott desktop app"""
data_path = str(DATA_DIR / 'private' / 'omnipod_pump' /
'omnipod_export_2019-04-13.csv')
df = pd.read_csv(data_path)
df.loc[df['Time'].isna(), 'Time'] = '12:00 AM'
df['Datetime'] = pd.to_datetime(df['Date'] + ' ' + df['Time'])
df = df.drop(columns=['Date', 'Time'])
df[['Value', 'Unit',
'_']] = (df['Value'].str.replace(r'\(|\)', '').str.split(
' ', expand=True))
df = df.drop(columns=['_'])
df['Value'] = pd.to_numeric(df['Value'])
cols = [
'Datetime', 'Type', 'Value', 'Unit', 'Description', 'Other Info',
'Comment'
]
df = df[cols]
# drop daily summaries and pump alarms
df = df.loc[~df['Type'].isin(['Insulin Summary', 'Pump Alarm'])]
# fix NaN values for bolus
mask = (df['Value'].isna() &
df['Description'].str.lower().str.contains('bolus') == True)
df.loc[mask, 'Type'] = 'Bolus Insulin'
df.loc[mask, 'Value'] = (
df.loc[mask, 'Description'].str.extract(r'([0-9].[0-9][0-9])')[0])
# fix NaN values for basal
mask = (df['Value'].isna() &
((df['Description'].str.lower().str.contains('basal rate set to')
) == True))
df.loc[mask, 'Type'] = 'Basal Insulin'
df.loc[mask, 'Value'] = (
df.loc[mask, 'Description'].str.extract(r'([0-9].[0-9][0-9])')[0])
df.loc[:, 'Value'] = | pd.to_numeric(df['Value']) | pandas.to_numeric |
"""
See also: test_reindex.py:TestReindexSetIndex
"""
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
date_range,
period_range,
to_datetime,
)
import pandas._testing as tm
class TestSetIndex:
def test_set_index_multiindex(self):
# segfault in GH#3308
d = {"t1": [2, 2.5, 3], "t2": [4, 5, 6]}
df = DataFrame(d)
tuples = [(0, 1), (0, 2), (1, 2)]
df["tuples"] = tuples
index = MultiIndex.from_tuples(df["tuples"])
# it works!
df.set_index(index)
def test_set_index_empty_column(self):
# GH#1971
df = DataFrame(
[
{"a": 1, "p": 0},
{"a": 2, "m": 10},
{"a": 3, "m": 11, "p": 20},
{"a": 4, "m": 12, "p": 21},
],
columns=["a", "m", "p", "x"],
)
result = df.set_index(["a", "x"])
expected = df[["m", "p"]]
expected.index = MultiIndex.from_arrays([df["a"], df["x"]], names=["a", "x"])
tm.assert_frame_equal(result, expected)
def test_set_index_empty_dataframe(self):
# GH#38419
df1 = DataFrame(
{"a": Series(dtype="datetime64[ns]"), "b": Series(dtype="int64"), "c": []}
)
df2 = df1.set_index(["a", "b"])
result = df2.index.to_frame().dtypes
expected = df1[["a", "b"]].dtypes
tm.assert_series_equal(result, expected)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([("foo", 1), ("foo", 2), ("bar", 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
result = df.set_index(df.columns[0])
expected = df.iloc[:, 1:]
expected.index = df.iloc[:, 0].values
expected.index.names = [df.columns[0]]
tm.assert_frame_equal(result, expected)
def test_set_index_timezone(self):
# GH#12358
# tz-aware Series should retain the tz
idx = DatetimeIndex(["2014-01-01 10:10:10"], tz="UTC").tz_convert("Europe/Rome")
df = DataFrame({"A": idx})
assert df.set_index(idx).index[0].hour == 11
assert DatetimeIndex(Series(df.A))[0].hour == 11
assert df.set_index(df.A).index[0].hour == 11
def test_set_index_cast_datetimeindex(self):
df = DataFrame(
{
"A": [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)],
"B": np.random.randn(1000),
}
)
idf = df.set_index("A")
assert isinstance(idf.index, DatetimeIndex)
def test_set_index_dst(self):
di = date_range("2006-10-29 00:00:00", periods=3, freq="H", tz="US/Pacific")
df = DataFrame(data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=di).reset_index()
# single level
res = df.set_index("index")
exp = DataFrame(
data={"a": [0, 1, 2], "b": [3, 4, 5]},
index=Index(di, name="index"),
)
exp.index = exp.index._with_freq(None)
tm.assert_frame_equal(res, exp)
# GH#12920
res = df.set_index(["index", "a"])
exp_index = MultiIndex.from_arrays([di, [0, 1, 2]], names=["index", "a"])
exp = DataFrame({"b": [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.set_index(idx[::2])
def test_set_index_names(self):
df = tm.makeDataFrame()
df.index.name = "name"
assert df.set_index(df.index).index.names == ["name"]
mi = MultiIndex.from_arrays(df[["A", "B"]].T.values, names=["A", "B"])
mi2 = MultiIndex.from_arrays(
df[["A", "B", "A", "B"]].T.values, names=["A", "B", "C", "D"]
)
df = df.set_index(["A", "B"])
assert df.set_index(df.index).index.names == ["A", "B"]
# Check that set_index isn't converting a MultiIndex into an Index
assert isinstance(df.set_index(df.index).index, MultiIndex)
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
idx2 = df.index.rename(["C", "D"])
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
assert isinstance(df.set_index([df.index, idx2]).index, MultiIndex)
# Check equality
tm.assert_index_equal(df.set_index([df.index, idx2]).index, mi2)
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2]}, index=[2010, 2011, 2012]
)
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
return_value = result.set_index(keys, drop=drop, inplace=True)
assert return_value is None
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays(
[df.index] + [df[x] for x in keys], names=[None] + keys
)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols, drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(["D"], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(["D"] + keys, drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH#1590
df = DataFrame({"val": [0, 1, 2], "key": ["a", "b", "c"]})
expected = DataFrame({"val": [1, 2]}, Index(["b", "c"], name="key"))
df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
result = df2.set_index("key")
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# Add list-of-list constructor because list is ambiguous -> lambda
# also test index name if append=True (name is duplicate here for B)
@pytest.mark.parametrize(
"box",
[
Series,
Index,
np.array,
list,
lambda x: [list(x)],
lambda x: MultiIndex.from_arrays([x]),
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "B"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_single_array(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
key = box(df["B"])
if box == list:
# list of strings gets interpreted as list of keys
msg = "['one', 'two', 'three', 'one', 'two']"
with pytest.raises(KeyError, match=msg):
df.set_index(key, drop=drop, append=append)
else:
# np.array/list-of-list "forget" the name of B
name_mi = getattr(key, "names", None)
name = [getattr(key, "name", None)] if name_mi is None else name_mi
result = df.set_index(key, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(["B"], drop=False, append=append)
expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# also test index name if append=True (name is duplicate here for A & B)
@pytest.mark.parametrize(
"box", [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x])]
)
@pytest.mark.parametrize(
"append, index_name",
[(True, None), (True, "A"), (True, "B"), (True, "test"), (False, None)],
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
keys = ["A", box(df["B"])]
# np.array/list "forget" the name of B
names = ["A", None if box in [np.array, list, tuple, iter] else "B"]
result = df.set_index(keys, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, only A is dropped, if at all
expected = df.set_index(["A", "B"], drop=False, append=append)
expected = expected.drop("A", axis=1) if drop else expected
expected.index.names = [index_name] + names if append else names
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# We also emulate a "constructor" for the label -> lambda
# also test index name if append=True (name is duplicate here for A)
@pytest.mark.parametrize(
"box2",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"box1",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "A"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays_duplicate(
self, frame_of_index_cols, drop, append, index_name, box1, box2
):
df = frame_of_index_cols
df.index.name = index_name
keys = [box1(df["A"]), box2(df["A"])]
result = df.set_index(keys, drop=drop, append=append)
# if either box is iter, it has been consumed; re-read
keys = [box1(df["A"]), box2(df["A"])]
# need to adapt first drop for case that both keys are 'A' --
# cannot drop the same column twice;
# plain == would give ambiguous Boolean error for containers
first_drop = (
False
if (
isinstance(keys[0], str)
and keys[0] == "A"
and isinstance(keys[1], str)
and keys[1] == "A"
)
else drop
)
# to test against already-tested behaviour, we add sequentially,
# hence second append always True; must wrap keys in list, otherwise
# box = list would be interpreted as keys
expected = df.set_index([keys[0]], drop=first_drop, append=append)
expected = expected.set_index([keys[1]], drop=drop, append=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_multiindex(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
keys = MultiIndex.from_arrays([df["A"], df["B"]], names=["A", "B"])
result = df.set_index(keys, drop=drop, append=append)
# setting with a MultiIndex will never drop columns
expected = df.set_index(["A", "B"], drop=False, append=append)
tm.assert_frame_equal(result, expected)
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
ci.name = "B"
# with Categorical
df = DataFrame({"A": np.random.randn(10), "B": ci.values})
idf = df.set_index("B")
| tm.assert_index_equal(idf.index, ci) | pandas._testing.assert_index_equal |
import math
import collections
import pandas as pd
from IPython.display import clear_output
def get_price_index(year_growth_rate, month):
return math.pow(1 + year_growth_rate/12, month)
Payment = collections.namedtuple('Payment', ['interest_amount',
'capital_downpayment'])
class Mortgage:
def __init__(self, interest_rate, mortgage_amount, maturity,
n_periods=12):
self.interest_rate = interest_rate
self.mortgage_amount = mortgage_amount
self.maturity = maturity
self.n_periods = n_periods
self.remaining_periods = maturity * n_periods
self.annuity_factor = self.get_annuity_factor()
self.monthly_payment = self.get_monthly_payment()
def get_annuity_factor(self):
interest_rate_amount_increase = (math.pow((1 + self.interest_rate
/ self.n_periods),
self.remaining_periods))
annuity_factor = ((interest_rate_amount_increase - 1)
/ ((self.interest_rate/self.n_periods)
* interest_rate_amount_increase))
return annuity_factor
def get_monthly_payment(self):
self.annuity_factor = self.get_annuity_factor()
return self.mortgage_amount / self.annuity_factor
def get_interest_amount(self):
return self.mortgage_amount * (self.interest_rate / self.n_periods)
def get_capital_downpayment(self):
interest_amount = self.get_interest_amount()
return self.monthly_payment - interest_amount
def update_interest_rate(self, new_interest_rate):
self.interest_rate = new_interest_rate
self.annuity_factor = self.get_annuity_factor()
new_monthly_payment = self.get_monthly_payment()
self.update_monthly_payment_amount(new_monthly_payment)
return self
def update_monthly_payment_amount(self, new_monthly_payment):
self.monthly_payment = new_monthly_payment
return self
def get_next_payment(self):
if self.mortgage_amount > 0:
interest_amount = self.get_interest_amount()
capital_downpayment_amount = self.get_capital_downpayment()
payment = Payment(interest_amount,
capital_downpayment_amount)
self.remaining_periods -= 1
self.mortgage_amount -= capital_downpayment_amount
self.mortgage_amount = max(self.mortgage_amount, 0)
else:
payment = Payment(0, 0)
self.remaining_periods -= 1
return payment
class RealEstate:
def __init__(self, purchase_price, mortgage):
self.purchase_price = purchase_price
self.current_price = purchase_price
self.mortgage = mortgage
self.history = {'mortgage_amount': [],
'price_index': [],
'current_price': [],
'interest_amount': [],
'capital_downpayment': []}
def tick_month(self, price_index):
self.history['mortgage_amount'].append(self.mortgage.mortgage_amount)
self.history['price_index'].append(price_index)
current_price = self.purchase_price * price_index
self.history['current_price'].append(current_price)
current_payment = self.mortgage.get_next_payment()._asdict()
for key in current_payment:
self.history[key].append(current_payment[key])
class StockPurchase:
def __init__(self, amount, purchase_price):
self.amount = amount
self.purchase_price = purchase_price
self.current_price = purchase_price
self.units = self.amount / self.purchase_price
self.value = self.units * self.current_price
def update_value(self, current_price):
self.current_price = current_price
self.value = self.units * self.current_price
return self
def to_dict(self):
return {'invested_amount': self.amount,
'purchase_price_stocks': self.purchase_price,
'current_price': self.current_price,
'value_stocks': self.value}
class Portfolio:
def __init__(self):
self.purchases = []
def purchase(self, amount, purchase_price):
self.purchases.append(StockPurchase(amount, purchase_price))
return self
def update_values(self, current_price):
for purchase in self.purchases:
purchase.update_value(current_price)
return self
class Scenario:
def __init__(self, real_estate, portfolio, growth_rate_real_estate,
growth_rate_stocks, initial_price_stocks,
mortgage_overpayment_amount, investment_amount, name=None):
self.real_estate = real_estate
self.portfolio = portfolio
self.growth_rate_real_estate = growth_rate_real_estate
self.growth_rate_stocks = growth_rate_stocks
self.initial_price_stocks = initial_price_stocks
self.mortgage_overpayment_amount = mortgage_overpayment_amount
self.investment_amount = investment_amount
if name is None:
self.name = '{}_{}_{}_{}'.format(self.growth_rate_real_estate,
self.growth_rate_stocks,
self.mortgage_overpayment_amount,
self.investment_amount)
else:
self.name = name
self.history = None
self.profit_real_estate = 0,
self.profit_stocks = 0
def update_profit(self):
self.profit_real_estate = (self.history['current_price_real_est'].iloc[-1]
- self.history['current_price_real_est'].iloc[0]
- self.history['interest_amount'].sum())
self.profit_stocks = self.history['profit_stocks'].sum()
def update_history(self):
real_estate_hist = pd.DataFrame(self.real_estate.history)
portfolio_purchases = [row.to_dict()
for row in self.portfolio.purchases]
portfolio_hist = pd.DataFrame.from_records(portfolio_purchases).drop('invested_amount', axis=1)
self.history = real_estate_hist.join(portfolio_hist,
lsuffix='_real_est',
rsuffix='_stocks')
self.history['scenario_name'] = self.name
self.history['growth_rate_real_estate'] = self.growth_rate_real_estate
self.history['growth_rate_stocks'] = self.growth_rate_stocks
self.history['mortgage_overpayment_amount'] = self.mortgage_overpayment_amount
self.history['investment_amount'] = self.investment_amount
self.history['month'] = [i for i in range(len(self.history))]
self.history['profit_stocks'] = (self.history['value_stocks']
- self.history['investment_amount'])
self.history['cumulative_interest_amount'] = self.history['interest_amount'].cumsum()
self.history['cumulative_profit_stocks'] = self.history['profit_stocks'].cumsum()
self.history['current_profit_real_estate'] =\
([self.history['current_price_real_est'].iloc[i]
- self.history['current_price_real_est'].iloc[0]
- self.history['cumulative_interest_amount'].iloc[i]
for i in range(len(self.history))])
self.history['profit_ratio'] = (self.history['current_profit_real_estate']
/ self.history['cumulative_profit_stocks'])
self.history['profit_diff'] = (self.history['current_profit_real_estate']
- self.history['cumulative_profit_stocks'])
return self
def run(self):
month = 0
maturity = self.real_estate.mortgage.maturity
n_periods = self.real_estate.mortgage.n_periods
max_periods = maturity * n_periods
new_monthly_payment_amount = (self.real_estate.mortgage.monthly_payment
+ self.mortgage_overpayment_amount)
self.real_estate\
.mortgage.update_monthly_payment_amount(new_monthly_payment_amount)
while month <= max_periods:
real_estate_price_index = get_price_index(self.growth_rate_real_estate, month)
self.real_estate.tick_month(real_estate_price_index)
stock_price = (get_price_index(self.growth_rate_stocks, month)
* self.initial_price_stocks)
self.portfolio.purchase(self.investment_amount, stock_price)
self.portfolio.update_values(stock_price)
month += 1
self.update_history()
self.update_profit()
class Simulation:
def __init__(self, scenarios):
self.scenarios = scenarios
self.history = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', None)
import pandas as pd
from sklearn import preprocessing
from pandas.plotting import scatter_matrix
from matplotlib import pyplot
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import DecisionTreeClassifier
import joblib
# import numpy as np
# In[2]:
df= | pd.read_csv("data/data500.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
import sqlite3
class DataTransformation:
"""Performs data loading and data transformation
"""
def __init__(self, url:str) -> None:
try:
self.__data = pd.read_csv(url, sep=";")
self.__transform()
except Exception as error:
raise error("There was a problem loading the file", error)
def __transform(self) -> None:
column_names = {"fecha_nacimiento":"birth_date", "fecha_vencimiento":"due_date",
"deuda":"due_balance","direccion":"address",
"correo":"email", "estatus_contacto":"status",
"deuda":"due_balance","prioridad":"priority", "telefono":"phone"}
try:
self.__data = self.__data.rename(column_names, axis=1)
#self.__data[['birth_date','due_date']] = self.__data[['birth_date','due_date']].apply(pd.to_datetime, format="%Y-%m-%d")
self.__data['due_date'] = pd.to_datetime(self.__data['due_date'])
self.__data['birth_date'] = | pd.to_datetime(self.__data['birth_date']) | pandas.to_datetime |
import pandas as pd
from sklearn.linear_model import SGDRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error
import matplotlib.pyplot as plt
import os
count = 0
reg = SGDRegressor()
predict_for = "NANOUSD.csv"
batch_size = "30T"
stop = pd.to_datetime("2020-08-01", format="%Y-%m-%d")
for pair_csv in os.listdir("../../../bots/bitsurfer/data"):
pair_path = "../../../bots/bitsurfer/data/" + pair_csv
df = pd.read_csv(pair_path, sep=",", names=["ts", "price", "vol"])
df["datetime"] = pd.to_datetime(df["ts"], unit="s")
df = df.set_index("datetime")
df_test = df[df.index >= stop]
df = df[df.index < stop]
if df.empty:
continue
# df = df_test
# train df
ts_df = df[["ts"]].resample(batch_size).mean()
price_df = df[["price"]].resample(batch_size).mean().fillna(0)
vol_df = df[["vol"]].resample(batch_size).sum().fillna(0)
resampled_df = pd.DataFrame(index=ts_df.index)
resampled_df["price"] = price_df["price"].values / max(price_df["price"].values)
resampled_df["vol"] = vol_df["vol"].values / max(vol_df["vol"].values)
resampled_df["price_t-1"] = resampled_df.shift(1)["price"]
resampled_df["price_t-2"] = resampled_df.shift(2)["price"]
resampled_df["vol_t-1"] = resampled_df.shift(1)["vol"]
resampled_df["vol_t-2"] = resampled_df.shift(2)["vol"]
resampled_df["target"] = resampled_df.shift(-1)["price"]
resampled_df = resampled_df.loc[(resampled_df[["price", "vol"]] != 0).any(axis=1)]
resampled_df = resampled_df.loc[
(resampled_df[["price_t-1", "vol_t-1"]] != 0).any(axis=1)
]
resampled_df = resampled_df.loc[
(resampled_df[["price_t-2", "vol_t-2"]] != 0).any(axis=1)
]
resampled_df = resampled_df.loc[(resampled_df[["target"]] != 0).any(axis=1)]
resampled_df = resampled_df.dropna()
# test df
if pair_csv == predict_for:
ts_df = df_test[["ts"]].resample(batch_size).mean()
price_df = df_test[["price"]].resample(batch_size).mean().fillna(0)
vol_df = df_test[["vol"]].resample(batch_size).sum().fillna(0)
resampled_test_df = | pd.DataFrame(index=ts_df.index) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#import networkx as nx
from scIB.utils import *
from scIB.preprocessing import score_cell_cycle
from scIB.clustering import opt_louvain
from scipy import sparse
from scipy.sparse.csgraph import connected_components
from scipy.io import mmwrite
import sklearn
import sklearn.metrics
from time import time
import cProfile
from pstats import Stats
import memory_profiler
import itertools
import multiprocessing
import subprocess
import tempfile
import pathlib
from os import mkdir, path, remove, stat
import gc
import rpy2.rinterface_lib.callbacks
import logging
rpy2.rinterface_lib.callbacks.logger.setLevel(logging.ERROR) # Ignore R warning messages
import rpy2.robjects as ro
import anndata2ri
# Define Errors
class RootCellError(Exception):
def __init__(self, message):
self.message = message
class NeighborsError(Exception):
def __init__(self, message):
self.message = message
### Silhouette score
def silhouette(adata, group_key, metric='euclidean', embed='X_pca', scale=True):
"""
wrapper for sklearn silhouette function values range from [-1, 1] with 1 being an ideal fit, 0 indicating overlapping clusters and -1 indicating misclassified cells
"""
if embed not in adata.obsm.keys():
print(adata.obsm.keys())
raise KeyError(f'{embed} not in obsm')
asw = sklearn.metrics.silhouette_score(adata.obsm[embed], adata.obs[group_key], metric=metric)
if scale:
asw = (asw + 1)/2
return asw
def silhouette_batch(adata, batch_key, group_key, metric='euclidean',
embed='X_pca', verbose=True, scale=True):
"""
Silhouette score of batch labels subsetted for each group.
params:
batch_key: batches to be compared against
group_key: group labels to be subsetted by e.g. cell type
metric: see sklearn silhouette score
embed: name of column in adata.obsm
returns:
all scores: absolute silhouette scores per group label
group means: if `mean=True`
"""
if embed not in adata.obsm.keys():
print(adata.obsm.keys())
raise KeyError(f'{embed} not in obsm')
sil_all = pd.DataFrame(columns=['group', 'silhouette_score'])
for group in adata.obs[group_key].unique():
adata_group = adata[adata.obs[group_key] == group]
if adata_group.obs[batch_key].nunique() == 1:
continue
sil_per_group = sklearn.metrics.silhouette_samples(adata_group.obsm[embed], adata_group.obs[batch_key],
metric=metric)
# take only absolute value
sil_per_group = [abs(i) for i in sil_per_group]
if scale:
# scale s.t. highest number is optimal
sil_per_group = [1 - i for i in sil_per_group]
d = pd.DataFrame({'group' : [group]*len(sil_per_group), 'silhouette_score' : sil_per_group})
sil_all = sil_all.append(d)
sil_all = sil_all.reset_index(drop=True)
sil_means = sil_all.groupby('group').mean()
if verbose:
print(f'mean silhouette per cell: {sil_means}')
return sil_all, sil_means
def plot_silhouette_score(adata_dict, batch_key, group_key, metric='euclidean',
embed='X_pca', palette='Dark2', per_group=False, verbose=True):
"""
params:
adata_dict: dictionary of adata objects, each labeled by e.g. integration method name
"""
with sns.color_palette(palette):
for label, adata in adata_dict.items():
checkAdata(adata)
sil_scores = silhouette(adata,
batch_key=batch_key,
group_key=group_key,
metric=metric,
embed=embed,
means=False,
verbose=verbose)
sns.distplot(sil_scores['silhouette_score'], label=label, hist=False)
plt.title('Silhouette scores per cell for all groups')
plt.show()
if per_group:
for data_set, adata in adata_dict.items():
sil_scores = silhouette(adata,
batch_key=batch_key,
group_key=group_key,
metric=metric,
embed=embed,
means=False,
verbose=verbose)
# plot for all groups
for group in sil_scores['group'].unique():
group_scores = sil_scores[sil_scores['group'] == group]
sns.distplot(group_scores['silhouette_score'], label=group, hist=False)
plt.title(f'Silhouette scores per cell for {data_set}')
plt.show()
### NMI normalised mutual information
def nmi(adata, group1, group2, method="arithmetic", nmi_dir=None):
"""
Normalized mutual information NMI based on 2 different cluster assignments `group1` and `group2`
params:
adata: Anndata object
group1: column name of `adata.obs` or group assignment
group2: column name of `adata.obs` or group assignment
method: NMI implementation
'max': scikit method with `average_method='max'`
'min': scikit method with `average_method='min'`
'geometric': scikit method with `average_method='geometric'`
'arithmetic': scikit method with `average_method='arithmetic'`
'Lancichinetti': implementation by <NAME> 2009 et al.
'ONMI': implementation by <NAME> et al. (https://github.com/aaronmcdaid/Overlapping-NMI) Hurley 2011
nmi_dir: directory of compiled C code if 'Lancichinetti' or 'ONMI' are specified as `method`. Compilation should be done as specified in the corresponding README.
return:
normalized mutual information (NMI)
"""
checkAdata(adata)
if isinstance(group1, str):
checkBatch(group1, adata.obs)
group1 = adata.obs[group1].tolist()
elif isinstance(group1, pd.Series):
group1 = group1.tolist()
if isinstance(group2, str):
checkBatch(group2, adata.obs)
group2 = adata.obs[group2].tolist()
elif isinstance(group2, pd.Series):
group2 = group2.tolist()
if len(group1) != len(group2):
raise ValueError(f'different lengths in group1 ({len(group1)}) and group2 ({len(group2)})')
# choose method
if method in ['max', 'min', 'geometric', 'arithmetic']:
nmi_value = sklearn.metrics.normalized_mutual_info_score(group1, group2, average_method=method)
elif method == "Lancichinetti":
nmi_value = nmi_Lanc(group1, group2, nmi_dir=nmi_dir)
elif method == "ONMI":
nmi_value = onmi(group1, group2, nmi_dir=nmi_dir)
else:
raise ValueError(f"Method {method} not valid")
return nmi_value
def onmi(group1, group2, nmi_dir=None, verbose=True):
"""
Based on implementation https://github.com/aaronmcdaid/Overlapping-NMI
publication: <NAME>, <NAME>, <NAME> 2011
params:
nmi_dir: directory of compiled C code
"""
if nmi_dir is None:
raise FileNotFoundError("Please provide the directory of the compiled C code from https://sites.google.com/site/andrealancichinetti/mutual3.tar.gz")
group1_file = write_tmp_labels(group1, to_int=False)
group2_file = write_tmp_labels(group2, to_int=False)
nmi_call = subprocess.Popen(
[nmi_dir+"onmi", group1_file, group2_file],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = nmi_call.communicate()
if stderr:
print(stderr)
nmi_out = stdout.decode()
if verbose:
print(nmi_out)
nmi_split = [x.strip().split('\t') for x in nmi_out.split('\n')]
nmi_max = float(nmi_split[0][1])
# remove temporary files
remove(group1_file)
remove(group2_file)
return nmi_max
def nmi_Lanc(group1, group2, nmi_dir="external/mutual3/", verbose=True):
"""
paper by <NAME> 2009
https://sites.google.com/site/andrealancichinetti/mutual
recommended by Malte
"""
if nmi_dir is None:
raise FileNotFoundError("Please provide the directory of the compiled C code from https://sites.google.com/site/andrealancichinetti/mutual3.tar.gz")
group1_file = write_tmp_labels(group1, to_int=False)
group2_file = write_tmp_labels(group2, to_int=False)
nmi_call = subprocess.Popen(
[nmi_dir+"mutual", group1_file, group2_file],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = nmi_call.communicate()
if stderr:
print(stderr)
nmi_out = stdout.decode().strip()
return float(nmi_out.split('\t')[1])
def write_tmp_labels(group_assignments, to_int=False, delim='\n'):
"""
write the values of a specific obs column into a temporary file in text format
needed for external C NMI implementations (onmi and nmi_Lanc functions), because they require files as input
params:
to_int: rename the unique column entries by integers in range(1,len(group_assignments)+1)
"""
if to_int:
label_map = {}
i = 1
for label in set(group_assignments):
label_map[label] = i
i += 1
labels = delim.join([str(label_map[name]) for name in group_assignments])
else:
labels = delim.join([str(name) for name in group_assignments])
clusters = {label:[] for label in set(group_assignments)}
for i, label in enumerate(group_assignments):
clusters[label].append(str(i))
output = '\n'.join([' '.join(c) for c in clusters.values()])
output = str.encode(output)
# write to file
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(output)
filename = f.name
return filename
### ARI adjusted rand index
def ari(adata, group1, group2):
"""
params:
adata: anndata object
group1: ground-truth cluster assignments (e.g. cell type labels)
group2: "predicted" cluster assignments
The function is symmetric, so group1 and group2 can be switched
"""
checkAdata(adata)
if isinstance(group1, str):
checkBatch(group1, adata.obs)
group1 = adata.obs[group1].tolist()
elif isinstance(group1, pd.Series):
group1 = group1.tolist()
if isinstance(group2, str):
checkBatch(group2, adata.obs)
group2 = adata.obs[group2].tolist()
elif isinstance(group2, pd.Series):
group2 = group2.tolist()
if len(group1) != len(group2):
raise ValueError(f'different lengths in group1 ({len(group1)}) and group2 ({len(group2)})')
return sklearn.metrics.cluster.adjusted_rand_score(group1, group2)
### Isolated label score
def isolated_labels(adata, label_key, batch_key, cluster_key="iso_cluster",
cluster=True, n=None, all_=False, verbose=True):
"""
score how well labels of isolated labels are distiguished in the dataset by
1. clustering-based approach
2. silhouette score
params:
cluster: if True, use clustering approach, otherwise use silhouette score approach
n: max number of batches per label for label to be considered as isolated.
if n is integer, consider labels that are present for n batches as isolated
if n=None, consider minimum number of batches that labels are present in
all_: return scores for all isolated labels instead of aggregated mean
return:
by default, mean of scores for each isolated label
retrieve dictionary of scores for each label if `all_` is specified
"""
scores = {}
isolated_labels = get_isolated_labels(adata, label_key, batch_key, cluster_key, n=n, verbose=verbose)
for label in isolated_labels:
score = score_isolated_label(adata, label_key, cluster_key, label, cluster=cluster, verbose=verbose)
scores[label] = score
if all_:
return scores
return np.mean(list(scores.values()))
def get_isolated_labels(adata, label_key, batch_key, cluster_key, n, verbose):
"""
get labels that are considered isolated by the number of batches
"""
tmp = adata.obs[[label_key, batch_key]].drop_duplicates()
batch_per_lab = tmp.groupby(label_key).agg({batch_key: "count"})
# threshold for determining when label is considered isolated
if n is None:
n = batch_per_lab.min().tolist()[0]
if verbose:
print(f"isolated labels: no more than {n} batches per label")
labels = batch_per_lab[batch_per_lab[batch_key] <= n].index.tolist()
if len(labels) == 0 and verbose:
print(f"no isolated labels with less than {n} batches")
return labels
def score_isolated_label(adata, label_key, cluster_key, label, cluster=True, verbose=False, **kwargs):
"""
compute label score for a single label
params:
cluster: if True, use clustering approach, otherwise use silhouette score approach
"""
adata_tmp = adata.copy()
def max_label_per_batch(adata, label_key, cluster_key, label, argmax=False):
"""cluster optimizing over cluster with largest number of isolated label per batch"""
sub = adata.obs[adata.obs[label_key] == label].copy()
label_counts = sub[cluster_key].value_counts()
if argmax:
return label_counts.index[label_counts.argmax()]
return label_counts.max()
def max_f1(adata, label_key, cluster_key, label, argmax=False):
"""cluster optimizing over largest F1 score of isolated label"""
obs = adata.obs
max_cluster = None
max_f1 = 0
for cluster in obs[cluster_key].unique():
y_pred = obs[cluster_key] == cluster
y_true = obs[label_key] == label
f1 = sklearn.metrics.f1_score(y_pred, y_true)
if f1 > max_f1:
max_f1 = f1
max_cluster = cluster
if argmax:
return max_cluster
return max_f1
if cluster:
opt_louvain(adata_tmp, label_key, cluster_key, function=max_f1, label=label, verbose=False, inplace=True)
score = max_f1(adata_tmp, label_key, cluster_key, label, argmax=False)
else:
adata_tmp.obs['group'] = adata_tmp.obs[label_key] == label
score = silhouette(adata_tmp, group_key='group', **kwargs)
del adata_tmp
if verbose:
print(f"{label}: {score}")
return score
def precompute_hvg_batch(adata, batch, features, n_hvg=500, save_hvg=False):
adata_list = splitBatches(adata, batch, hvg=features)
hvg_dir = {}
for i in adata_list:
sc.pp.filter_genes(i, min_cells=1)
n_hvg_tmp = np.minimum(n_hvg, int(0.5*i.n_vars))
if n_hvg_tmp<n_hvg:
print(i.obs[batch][0]+' has less than the specified number of genes')
print('Number of genes: '+str(i.n_vars))
hvg = sc.pp.highly_variable_genes(i, flavor='cell_ranger', n_top_genes=n_hvg_tmp, inplace=False)
hvg_dir[i.obs[batch][0]] = i.var.index[hvg['highly_variable']]
adata_list=None
if save_hvg:
adata.uns['hvg_before']=hvg_dir
else:
return hvg_dir
### Highly Variable Genes conservation
def hvg_overlap(adata_pre, adata_post, batch, n_hvg=500):
hvg_post = adata_post.var_names
adata_post_list = splitBatches(adata_post, batch)
overlap = []
if ('hvg_before' in adata_pre.uns_keys()) and (set(hvg_post) == set(adata_pre.var_names)):
print('Using precomputed hvgs per batch')
hvg_pre_list = adata_pre.uns['hvg_before']
else:
hvg_pre_list = precompute_hvg_batch(adata_pre, batch, hvg_post)
for i in range(len(adata_post_list)):#range(len(adata_pre_list)):
sc.pp.filter_genes(adata_post_list[i], min_cells=1) # remove genes unexpressed (otherwise hvg might break)
#ov = list(set(adata_pre_list[i].var_names).intersection(set(hvg_pre_list[i])))
#adata_pre_list[i] = adata_pre_list[i][:,ov]
#adata_post_list[i] = adata_post_list[i][:,ov]
batch_var = adata_post_list[i].obs[batch][0]
n_hvg_tmp = len(hvg_pre_list[batch_var])#adata_pre.uns['n_hvg'][hvg_post]#np.minimum(n_hvg, int(0.5*adata_post_list[i].n_vars))
print(n_hvg_tmp)
#if n_hvg_tmp<n_hvg:
# print(adata_post_list[i].obs[batch][0]+' has less than the specified number of genes')
# print('Number of genes: '+str(adata_post_list[i].n_vars))
#hvg_pre = sc.pp.highly_variable_genes(adata_pre_list[i], flavor='cell_ranger', n_top_genes=n_hvg_tmp, inplace=False)
tmp_pre = hvg_pre_list[batch_var] #adata_pre_list[i].var.index[hvg_pre['highly_variable']]
hvg_post = sc.pp.highly_variable_genes(adata_post_list[i], flavor='cell_ranger', n_top_genes=n_hvg_tmp, inplace=False)
tmp_post = adata_post_list[i].var.index[hvg_post['highly_variable']]
n_hvg_real = np.minimum(len(tmp_pre),len(tmp_post))
overlap.append((len(set(tmp_pre).intersection(set(tmp_post))))/n_hvg_real)
return np.mean(overlap)
### Cell cycle effect
def precompute_cc_score(adata, batch_key, organism='mouse',
n_comps=50, verbose=False):
batches = adata.obs[batch_key].cat.categories
scores_before = {}
s_score = []
g2m_score = []
for batch in batches:
raw_sub = adata[adata.obs[batch_key] == batch].copy()
#score cell cycle if not already done
if (np.in1d(['S_score', 'G2M_score'], adata.obs_keys()).sum() < 2):
score_cell_cycle(raw_sub, organism=organism)
s_score.append(raw_sub.obs['S_score'])
g2m_score.append(raw_sub.obs['G2M_score'])
covariate = raw_sub.obs[['S_score', 'G2M_score']]
before = pc_regression(raw_sub.X, covariate, pca_sd=None, n_comps=n_comps, verbose=verbose)
scores_before.update({batch : before})
if (np.in1d(['S_score', 'G2M_score'], adata.obs_keys()).sum() < 2):
adata.obs['S_score'] = pd.concat(s_score)
adata.obs['G2M_score'] = pd.concat(g2m_score)
adata.uns['scores_before'] = scores_before
return
def cell_cycle(adata_pre, adata_post, batch_key, embed=None, agg_func=np.mean,
organism='mouse', n_comps=50, verbose=False):
"""
Compare the variance contribution of S-phase and G2/M-phase cell cycle scores before and
after integration. Cell cycle scores are computed per batch on the unintegrated data set,
eliminatimg the batch effect confounded by the `batch_key` variable. This function
returns a score between 1 and 0. The larger the score, the stronger the cell cycle
variance is conserved.
This score can be calculated on full corrected feature spaces and latent embeddings as
variance contributions of a fixed score can be obtained via PC regression here.
params:
adata_pre, adata_post: adatas before and after integration
embed : if `embed=None`, use the full expression matrix (`adata.X`), otherwise
use the embedding provided in `adata_post.obsm[embed]`
agg_func: any function that takes a list of numbers and aggregates them into a single number.
If `agg_func=None`, all results will be returned
organism: 'mouse' or 'human' for choosing cell cycle genes
"""
checkAdata(adata_pre)
checkAdata(adata_post)
if embed == 'X_pca':
embed = None
batches = adata_pre.obs[batch_key].unique()
scores_final = []
scores_before = []
scores_after = []
#if both (s-score, g2m-score) and pc-regression are pre-computed
if (np.in1d(['S_score', 'G2M_score'],
adata_pre.obs_keys()).sum() == 2) and ('scores_before' in adata_pre.uns_keys()):
#extract needed infos from adata_pre and delete it from memory
df_pre = adata_pre.obs[['S_score', 'G2M_score', batch_key]]
scores_precomp = pd.Series(adata_pre.uns['scores_before'])
del adata_pre
n_item = gc.collect()
for batch in enumerate(batches):
raw_sub = df_pre.loc[df_pre[batch_key] == batch[1]]
int_sub = adata_post[adata_post.obs[batch_key] == batch[1]].copy()
int_sub = int_sub.obsm[embed] if embed is not None else int_sub.X
if raw_sub.shape[0] != int_sub.shape[0]:
message = f'batch "{batch[1]}" of batch_key "{batch_key}" '
message += 'has unequal number of entries before and after integration.'
message += f'before: {raw_sub.shape[0]} after: {int_sub.shape[0]}'
raise ValueError(message)
if verbose:
print("score cell cycle")
covariate = raw_sub[['S_score', 'G2M_score']]
after = pc_regression(int_sub, covariate, pca_sd=None, n_comps=n_comps, verbose=verbose)
scores_after.append(after)
#get score before from list of pre-computed scores
before = scores_precomp[batch[1]]
scores_before.append(before)
score = 1 - abs(after - before)/before # scaled result
if score < 0:
# Here variance contribution becomes more than twice as large as before
if verbose:
print("Variance contrib more than twice as large after integration.")
print("Setting score to 0.")
score = 0
scores_final.append(score)
if verbose:
print(f"batch: {batch[1]}\t before: {before}\t after: {after}\t score: {score}")
else: #not everything is pre-computed
for batch in batches:
raw_sub = adata_pre[adata_pre.obs[batch_key] == batch]
int_sub = adata_post[adata_post.obs[batch_key] == batch]
int_sub = int_sub.obsm[embed] if embed is not None else int_sub.X
if raw_sub.shape[0] != int_sub.shape[0]:
message = f'batch "{batch}" of batch_key "{batch_key}" '
message += 'has unequal number of entries before and after integration.'
message += f'before: {raw_sub.shape[0]} after: {int_sub.shape[0]}'
raise ValueError(message)
if verbose:
print("score cell cycle")
#compute cell cycle score if not done already
if (np.in1d(['S_score', 'G2M_score'], raw_sub.obs_keys()).sum() < 2):
score_cell_cycle(raw_sub, organism=organism)
covariate = raw_sub.obs[['S_score', 'G2M_score']]
before = pc_regression(raw_sub.X, covariate, pca_sd=None, n_comps=n_comps, verbose=verbose)
scores_before.append(before)
after = pc_regression(int_sub, covariate, pca_sd=None, n_comps=n_comps, verbose=verbose)
scores_after.append(after)
score = 1 - abs(after - before)/before # scaled result
if score < 0:
# Here variance contribution becomes more than twice as large as before
if verbose:
print("Variance contrib more than twice as large after integration.")
print("Setting score to 0.")
score = 0
scores_final.append(score)
if verbose:
print(f"batch: {batch}\t before: {before}\t after: {after}\t score: {score}")
if agg_func is None:
return pd.DataFrame([batches, scores_before, scores_after, scores_final],
columns=['batch', 'before', 'after', 'score'])
else:
return agg_func(scores_final)
### PC Regression
def pcr_comparison(adata_pre, adata_post, covariate, embed=None, n_comps=50, scale=True, verbose=False):
"""
Compare the effect before and after integration
Return either the difference of variance contribution before and after integration
or a score between 0 and 1 (`scaled=True`) with 0 if the variance contribution hasn't
changed. The larger the score, the more different the variance contributions are before
and after integration.
params:
adata_pre: uncorrected adata
adata_post: integrated adata
embed : if `embed=None`, use the full expression matrix (`adata.X`), otherwise
use the embedding provided in `adata_post.obsm[embed]`
scale: if True, return scaled score
return:
difference of R2Var value of PCR
"""
if embed == 'X_pca':
embed = None
pcr_before = pcr(adata_pre, covariate=covariate, recompute_pca=True,
n_comps=n_comps, verbose=verbose)
pcr_after = pcr(adata_post, covariate=covariate, embed=embed, recompute_pca=True,
n_comps=n_comps, verbose=verbose)
if scale:
score = (pcr_before - pcr_after)/pcr_before
if score < 0:
print("Variance contribution increased after integration!")
print("Setting PCR comparison score to 0.")
score = 0
return score
else:
return pcr_after - pcr_before
def pcr(adata, covariate, embed=None, n_comps=50, recompute_pca=True, verbose=False):
"""
PCR for Adata object
Checks whether to
+ compute PCA on embedding or expression data (set `embed` to name of embedding matrix e.g. `embed='X_emb'`)
+ use existing PCA (only if PCA entry exists)
+ recompute PCA on expression matrix (default)
params:
adata: Anndata object
embed : if `embed=None`, use the full expression matrix (`adata.X`), otherwise
use the embedding provided in `adata_post.obsm[embed]`
n_comps: number of PCs if PCA should be computed
covariate: key for adata.obs column to regress against
return:
R2Var of PCR
"""
checkAdata(adata)
checkBatch(covariate, adata.obs)
if verbose:
print(f"covariate: {covariate}")
batch = adata.obs[covariate]
# use embedding for PCA
if (embed is not None) and (embed in adata.obsm):
if verbose:
print(f"compute PCR on embedding n_comps: {n_comps}")
return pc_regression(adata.obsm[embed], batch, n_comps=n_comps)
# use existing PCA computation
elif (recompute_pca == False) and ('X_pca' in adata.obsm) and ('pca' in adata.uns):
if verbose:
print("using existing PCA")
return pc_regression(adata.obsm['X_pca'], batch, pca_sd=adata.uns['pca']['variance'])
# recompute PCA
else:
if verbose:
print(f"compute PCA n_comps: {n_comps}")
return pc_regression(adata.X, batch, n_comps=n_comps)
def pc_regression(data, covariate, pca_sd=None, n_comps=50, svd_solver='arpack', verbose=False):
"""
params:
data: expression or PCA matrix. Will be assumed to be PCA values, if pca_sd is given
covariate: series or list of batch assignemnts
n_comps: number of PCA components for computing PCA, only when pca_sd is not given. If no pca_sd is given and n_comps=None, comute PCA and don't reduce data
pca_sd: iterable of variances for `n_comps` components. If `pca_sd` is not `None`, it is assumed that the matrix contains PCA values, else PCA is computed
PCA is only computed, if variance contribution is not given (pca_sd).
"""
if isinstance(data, (np.ndarray, sparse.csr_matrix)):
matrix = data
else:
raise TypeError(f'invalid type: {data.__class__} is not a numpy array or sparse matrix')
# perform PCA if no variance contributions are given
if pca_sd is None:
if n_comps is None or n_comps > min(matrix.shape):
n_comps = min(matrix.shape)
if n_comps == min(matrix.shape):
svd_solver = 'full'
if verbose:
print("compute PCA")
pca = sc.tl.pca(matrix, n_comps=n_comps, use_highly_variable=False,
return_info=True, svd_solver=svd_solver, copy=True)
X_pca = pca[0].copy()
pca_sd = pca[3].copy()
del pca
else:
X_pca = matrix
n_comps = matrix.shape[1]
## PC Regression
if verbose:
print("fit regression on PCs")
# one-hot encode categorical values
covariate = pd.get_dummies(covariate).to_numpy()
# fit linear model for n_comps PCs
r2 = []
for i in range(n_comps):
pc = X_pca[:, [i]]
lm = sklearn.linear_model.LinearRegression()
lm.fit(pc, covariate)
r2_score = lm.score(pc, covariate)
#pred = lm.predict(pc)
#r2_score = scm.r2_score(pred, covariate, multioutput='uniform_average')
#print(r2_score)
#print(pred)
#print(covariate)
r2.append(r2_score)
Var = pca_sd**2 / sum(pca_sd**2) * 100
R2Var = sum(r2*Var)/100
return R2Var
### lisi score
def get_hvg_indices(adata, verbose=True):
if "highly_variable" not in adata.var.columns:
if verbose:
print(f"No highly variable genes computed, continuing with full matrix {adata.shape}")
return np.array(range(adata.n_vars))
return np.where((adata.var["highly_variable"] == True))[0]
def select_hvg(adata, select=True):
if select and 'highly_variable' in adata.var:
return adata[:, adata.var['highly_variable']].copy()
else:
return adata
### diffusion for connectivites matrix extension
def diffusion_conn(adata, min_k=50, copy=True, max_iterations=26):
'''
This function performs graph diffusion on the connectivities matrix until a
minimum number `min_k` of entries per row are non-zero.
Note:
Due to self-loops min_k-1 non-zero connectivies entries is actually the stopping
criterion. This is equivalent to `sc.pp.neighbors`.
Returns:
The diffusion-enhanced connectivities matrix of a copy of the AnnData object
with the diffusion-enhanced connectivities matrix is in
`adata.uns["neighbors"]["conectivities"]`
'''
if 'neighbors' not in adata.uns:
raise ValueError('`neighbors` not in adata object. '
'Please compute a neighbourhood graph!')
if 'connectivities' not in adata.uns['neighbors']:
raise ValueError('`connectivities` not in `adata.uns["neighbors"]`. '
'Please pass an object with connectivities computed!')
T = adata.uns['neighbors']['connectivities']
#Normalize T with max row sum
# Note: This keeps the matrix symmetric and ensures |M| doesn't keep growing
T = sparse.diags(1/np.array([T.sum(1).max()]*T.shape[0]))*T
M = T
# Check for disconnected component
n_comp, labs = connected_components(adata.uns['neighbors']['connectivities'],
connection='strong')
if n_comp > 1:
tab = pd.value_counts(labs)
small_comps = tab.index[tab<min_k]
large_comp_mask = np.array(~pd.Series(labs).isin(small_comps))
else:
large_comp_mask = np.array([True]*M.shape[0])
T_agg = T
i = 2
while ((M[large_comp_mask,:][:,large_comp_mask]>0).sum(1).min() < min_k) and (i < max_iterations):
print(f'Adding diffusion to step {i}')
T_agg *= T
M += T_agg
i+=1
if (M[large_comp_mask,:][:,large_comp_mask]>0).sum(1).min() < min_k:
raise ValueError('could not create diffusion connectivities matrix'
f'with at least {min_k} non-zero entries in'
f'{max_iterations} iterations.\n Please increase the'
'value of max_iterations or reduce k_min.\n')
M.setdiag(0)
if copy:
adata_tmp = adata.copy()
adata_tmp.uns['neighbors'].update({'diffusion_connectivities': M})
return adata_tmp
else:
return M
### diffusion neighbourhood score
def diffusion_nn(adata, k, max_iterations=26):
'''
This function generates a nearest neighbour list from a connectivities matrix
as supplied by BBKNN or Conos. This allows us to select a consistent number
of nearest neighbours across all methods.
Return:
`k_indices` a numpy.ndarray of the indices of the k-nearest neighbors.
'''
if 'neighbors' not in adata.uns:
raise ValueError('`neighbors` not in adata object. '
'Please compute a neighbourhood graph!')
if 'connectivities' not in adata.uns['neighbors']:
raise ValueError('`connectivities` not in `adata.uns["neighbors"]`. '
'Please pass an object with connectivities computed!')
T = adata.uns['neighbors']['connectivities']
# Row-normalize T
T = sparse.diags(1/T.sum(1).A.ravel())*T
T_agg = T**3
M = T+T**2+T_agg
i = 4
while ((M>0).sum(1).min() < (k+1)) and (i < max_iterations):
#note: k+1 is used as diag is non-zero (self-loops)
print(f'Adding diffusion to step {i}')
T_agg *= T
M += T_agg
i+=1
if (M>0).sum(1).min() < (k+1):
raise NeighborsError(f'could not find {k} nearest neighbors in {max_iterations}'
'diffusion steps.\n Please increase max_iterations or reduce'
' k.\n')
M.setdiag(0)
k_indices = np.argpartition(M.A, -k, axis=1)[:, -k:]
return k_indices
def lisi_knn(adata, batch_key, label_key, perplexity=None, verbose=False):
"""
Deprecated
Compute LISI score on kNN graph provided in the adata object. By default, perplexity
is chosen as 1/3 * number of nearest neighbours in the knn-graph.
"""
if 'neighbors' not in adata.uns:
raise AttributeError(f"key 'neighbors' not found. Please make sure that a " +
"kNN graph has been computed")
elif verbose:
print("using precomputed kNN graph")
#get knn index matrix
if verbose:
print("Convert nearest neighbor matrix and distances for LISI.")
dist_mat = sparse.find(adata.uns['neighbors']['distances'])
#get number of nearest neighbours parameter
if 'params' not in adata.uns['neighbors']:
#estimate the number of nearest neighbors as the median
#of the distance matrix
_, e = np.unique(dist_mat[0], return_counts=True)
n_nn = np.nanmin(e)
n_nn = n_nn.astype('int')
else:
n_nn = adata.uns['neighbors']['params']['n_neighbors']-1
nn_index = np.empty(shape=(adata.uns['neighbors']['distances'].shape[0],
n_nn))
nn_dists = np.empty(shape=(adata.uns['neighbors']['distances'].shape[0],
n_nn))
index_out = []
for cell_id in np.arange(np.min(dist_mat[0]), np.max(dist_mat[0])+1):
get_idx = dist_mat[0] == cell_id
num_idx = get_idx.sum()
#in case that get_idx contains more than n_nn neighbours, cut away the outlying ones
#potential enhancement: handle case where less than n_nn neighbours are reported
if num_idx >= n_nn:
nn_index[cell_id,:] = dist_mat[1][get_idx][np.argsort(dist_mat[2][get_idx])][:n_nn]
nn_dists[cell_id,:] = np.sort(dist_mat[2][get_idx])[:n_nn]
else:
index_out.append(cell_id)
out_cells = len(index_out)
if out_cells > 0:
#remove all indexes in nn_index and nn_dists, which are 0
#COMMENT: Terrible idea and commented out
#nn_dists = np.delete(nn_dists, index_out, 0)
#nn_index = np.delete(nn_index, index_out, 0)
if verbose:
print(f"{out_cells} had less than {n_nn} neighbors and were omitted in LISI score.")
if perplexity is None:
# use LISI default
perplexity = np.floor(nn_index.shape[1]/3)
# run LISI in R
anndata2ri.activate()
ro.r("library(lisi)")
if verbose:
print("importing knn-graph")
ro.globalenv['nn_indx'] = nn_index.astype('int').T
ro.globalenv['nn_dst'] = nn_dists.T
ro.globalenv['perplexity'] = perplexity
ro.globalenv['batch'] = adata.obs[batch_key].cat.codes.values
ro.globalenv['n_batches'] = len(np.unique(adata.obs[batch_key]))
ro.globalenv['label'] = adata.obs[label_key].cat.codes.values
ro.globalenv['n_labels'] = len(np.unique(adata.obs[label_key]))
if verbose:
print("LISI score estimation")
simpson_estimate_batch = ro.r(f"simpson.estimate_batch <- compute_simpson_index(nn_dst, nn_indx, batch, n_batches, perplexity)") #batch_label_keys)")
simpson_estimate_label = ro.r(f"simpson.estimate_label <- compute_simpson_index(nn_dst, nn_indx, label, n_labels, perplexity)") #batch_label_keys)")
simpson_est_batch = 1/np.squeeze(ro.r("simpson.estimate_batch"))
simpson_est_label = 1/np.squeeze(ro.r("simpson.estimate_label"))
anndata2ri.deactivate()
# extract results
d = {batch_key : simpson_est_batch, label_key : simpson_est_label}
lisi_estimate = pd.DataFrame(data=d, index=np.arange(0,len(simpson_est_label)))
return lisi_estimate
#LISI core functions (which we want to implement in cython for speed
def Hbeta(D_row, beta):
"""
Helper function for simpson index computation
"""
P = np.exp(- D_row * beta)
sumP = np.nansum(P)
if (sumP == 0):
H = 0
P = np.zeros(len(D_row))
else:
H = np.log(sumP) + beta * np.nansum(D_row*P) / sumP
P /= sumP
return H, P
#helper function for LISI
def convertToOneHot(vector, num_classes=None):
"""
Converts an input 1-D vector of integers into an output
2-D array of one-hot vectors, where an i'th input value
of j will set a '1' in the i'th row, j'th column of the
output array.
Example:
v = np.array((1, 0, 4))
one_hot_v = convertToOneHot(v)
print one_hot_v
[[0 1 0 0 0]
[1 0 0 0 0]
[0 0 0 0 1]]
"""
#assert isinstance(vector, np.ndarray)
#assert len(vector) > 0
if num_classes is None:
num_classes = np.max(vector)+1
#else:
# assert num_classes > 0
# assert num_classes >= np.max(vector)
result = np.zeros(shape=(len(vector), num_classes))
result[np.arange(len(vector)), vector] = 1
return result.astype(int)
#LISI core functions (which we want to implement in cython for speed
def compute_simpson_index(D = None, knn_idx = None, batch_labels = None, n_batches = None,
perplexity = 15, tol = 1e-5):
"""
Simpson index of batch labels subsetted for each group.
params:
D: distance matrix n_cells x n_nearest_neighbors
knn_idx: index of n_nearest_neighbors of each cell
batch_labels: a vector of length n_cells with batch info
n_batches: number of unique batch labels
perplexity: effective neighborhood size
tol: a tolerance for testing effective neighborhood size
returns:
simpson: the simpson index for the neighborhood of each cell
"""
n = D.shape[0]
P = np.zeros(D.shape[1])
simpson = np.zeros(n)
logU = np.log(perplexity)
#loop over all cells
for i in np.arange(0, n, 1):
beta = 1
# negative infinity
betamin = -np.inf
# positive infinity
betamax = np.inf
#get active row of D
D_act = D[i,:]
H, P = Hbeta(D_act, beta)
Hdiff = H - logU
tries = 0
#first get neighbor probabilities
while (np.logical_and(np.abs(Hdiff) > tol, tries < 50)):
if (Hdiff > 0):
betamin = beta
if (betamax == np.inf):
beta *= 2
else:
beta = (beta + betamax) / 2
else:
betamax = beta
if (betamin== -np.inf):
beta /= 2
else:
beta = (beta + betamin) / 2
H, P = Hbeta(D_act, beta)
Hdiff = H - logU
tries += 1
if (H == 0):
simpson[i] = -1
continue
#then compute Simpson's Index
non_nan_knn = knn_idx[i][np.invert(np.isnan(knn_idx[i]))].astype('int')
batch = batch_labels[non_nan_knn]
#convertToOneHot omits all nan entries.
#Therefore, we run into errors in np.matmul.
if len(batch) == len(P):
B = convertToOneHot(batch, n_batches)
sumP = np.matmul(P,B) #sum P per batch
simpson[i] = np.dot(sumP, sumP) #sum squares
else: #assign worst possible score
simpson[i] = 1
return simpson
def lisi_knn_py(adata, batch_key, label_key, perplexity=None, verbose=False):
"""
Compute LISI score on kNN graph provided in the adata object. By default, perplexity
is chosen as 1/3 * number of nearest neighbours in the knn-graph.
"""
if 'neighbors' not in adata.uns:
raise AttributeError(f"key 'neighbors' not found. Please make sure that a " +
"kNN graph has been computed")
elif verbose:
print("using precomputed kNN graph")
#get knn index matrix
if verbose:
print("Convert nearest neighbor matrix and distances for LISI.")
dist_mat = sparse.find(adata.uns['neighbors']['distances'])
#get number of nearest neighbours parameter
if 'params' not in adata.uns['neighbors']:
#estimate the number of nearest neighbors as the median
#of the distance matrix
_, e = np.unique(dist_mat[0], return_counts=True)
n_nn = np.nanmedian(e)
n_nn = n_nn.astype('int')
else:
n_nn = adata.uns['neighbors']['params']['n_neighbors']-1
#initialise index and fill it with NaN values
nn_index = np.empty(shape=(adata.uns['neighbors']['distances'].shape[0],
n_nn))
nn_index[:] = np.NaN
nn_dists = np.empty(shape=(adata.uns['neighbors']['distances'].shape[0],
n_nn))
nn_dists[:] = np.NaN
index_out = []
for cell_id in np.arange(np.min(dist_mat[0]), np.max(dist_mat[0])+1):
get_idx = dist_mat[0] == cell_id
num_idx = get_idx.sum()
#in case that get_idx contains more than n_nn neighbours, cut away the outlying ones
fin_idx = np.min([num_idx, n_nn])
nn_index[cell_id,:fin_idx] = dist_mat[1][get_idx][np.argsort(dist_mat[2][get_idx])][:fin_idx]
nn_dists[cell_id,:fin_idx] = np.sort(dist_mat[2][get_idx])[:fin_idx]
if num_idx < n_nn:
index_out.append(cell_id)
out_cells = len(index_out)
if out_cells > 0:
if verbose:
print(f"{out_cells} had less than {n_nn} neighbors.")
if perplexity is None:
# use LISI default
perplexity = np.floor(nn_index.shape[1]/3)
# run LISI in python
if verbose:
print("importing knn-graph")
batch = adata.obs[batch_key].cat.codes.values
n_batches = len(np.unique(adata.obs[batch_key]))
label = adata.obs[label_key].cat.codes.values
n_labels = len(np.unique(adata.obs[label_key]))
if verbose:
print("LISI score estimation")
simpson_estimate_batch = compute_simpson_index(D = nn_dists,
knn_idx = nn_index,
batch_labels = batch,
n_batches = n_batches,
perplexity = perplexity,
)
simpson_estimate_label = compute_simpson_index(D = nn_dists,
knn_idx = nn_index,
batch_labels = label,
n_batches = n_labels,
perplexity = perplexity
)
simpson_est_batch = 1/simpson_estimate_batch
simpson_est_label = 1/simpson_estimate_label
# extract results
d = {batch_key : simpson_est_batch, label_key : simpson_est_label}
lisi_estimate = pd.DataFrame(data=d, index=np.arange(0,len(simpson_est_label)))
return lisi_estimate
def lisi_matrix(adata, batch_key, label_key, matrix=None, verbose=False):
"""
deprecated
Computes the LISI scores for a given data matrix in adata.X. The scoring function of the
LISI R package is called with default parameters. This function takes a data matrix and
recomputes nearest neighbours.
"""
if matrix is None:
matrix = adata.X
#lisi score runs only on dense matrices (knn search)
if sparse.issparse(matrix):
matrix = matrix.todense()
# run LISI in R
anndata2ri.activate()
ro.r("library(lisi)")
if verbose:
print("importing expression matrix")
ro.globalenv['data_mtrx'] = matrix
if verbose:
print(f"covariates: {batch_key} and {label_key}")
metadata = adata.obs[[batch_key, label_key]]
ro.globalenv['metadata'] = metadata
batch_label_keys = ro.StrVector([batch_key, label_key])
ro.globalenv['batch_label_keys'] = batch_label_keys
if verbose:
print("LISI score estimation")
lisi_estimate = ro.r(f"lisi.estimate <- compute_lisi(data_mtrx, metadata, batch_label_keys)") #batch_label_keys)")
anndata2ri.deactivate()
return lisi_estimate
def lisi(adata, batch_key, label_key, k0=90, type_= None, scale=True, verbose=False):
"""
Compute lisi score (after integration)
params:
matrix: matrix from adata to calculate on
covariate_key: variable to compute iLISI on
cluster_key: variable to compute cLISI on
return:
pd.DataFrame with median cLISI and median iLISI scores (following the harmony paper)
"""
checkAdata(adata)
checkBatch(batch_key, adata.obs)
checkBatch(label_key, adata.obs)
#if type_ != 'knn':
# if verbose:
# print("recompute kNN graph with {k0} nearest neighbors.")
#recompute neighbours
if (type_ == 'embed'):
adata_tmp = sc.pp.neighbors(adata,n_neighbors=k0, use_rep = 'X_emb', copy=True)
elif (type_ == 'full'):
if 'X_pca' not in adata.obsm.keys():
sc.pp.pca(adata, svd_solver = 'arpack')
adata_tmp = sc.pp.neighbors(adata, n_neighbors=k0, copy=True)
else:
adata_tmp = adata.copy()
#if knn - do not compute a new neighbourhood graph (it exists already)
#lisi_score = lisi_knn(adata=adata, batch_key=batch_key, label_key=label_key, verbose=verbose)
lisi_score = lisi_knn_py(adata=adata_tmp, batch_key=batch_key, label_key=label_key, verbose=verbose)
# iLISI: nbatches good, 1 bad
ilisi_score = np.nanmedian(lisi_score[batch_key])
# cLISI: 1 good, nbatches bad
clisi_score = np.nanmedian(lisi_score[label_key])
if scale:
#get number of batches
nbatches = len(np.unique(adata.obs[batch_key]))
ilisi_score, clisi_score = scale_lisi(ilisi_score, clisi_score, nbatches)
return ilisi_score, clisi_score
#LISI core function for shortest paths
def compute_simpson_index_graph(input_path = None,
batch_labels = None, n_batches = None, n_neighbors = 90,
perplexity = 30, chunk_no = 0,tol = 1e-5):
"""
Simpson index of batch labels subsetted for each group.
params:
input_path: file_path to pre-computed index and distance files
batch_labels: a vector of length n_cells with batch info
n_batches: number of unique batch labels
n_neighbors: number of nearest neighbors
perplexity: effective neighborhood size
chunk_no: for parallelisation, chunk id to evaluate
tol: a tolerance for testing effective neighborhood size
returns:
simpson: the simpson index for the neighborhood of each cell
"""
#initialize
P = np.zeros(n_neighbors)
logU = np.log(perplexity)
if chunk_no is None:
chunk_no = 0
#check if the target file is not empty
if stat(input_path + '_indices_'+ str(chunk_no) + '.txt').st_size == 0:
print("File has no entries. Doing nothing.")
lists = np.zeros(0)
return lists
#read distances and indices with nan value handling
indices = pd.read_csv(input_path + '_indices_'+ str(chunk_no) + '.txt',
header= None,sep='\n')
indices = indices[0].str.split(',', expand=True)
indices.set_index(keys=0, drop=True, inplace=True) #move cell index to DF index
indices = indices.T
distances = pd.read_csv(input_path + '_distances_'+ str(chunk_no) + '.txt',
header= None, sep='\n')
distances = distances[0].str.split(',', expand=True)
distances.set_index(keys=0, drop=True, inplace=True) #move cell index to DF index
distances = distances.T
#get cell ids
chunk_ids = indices.columns.values.astype('int')
#define result vector
simpson = np.zeros(len(chunk_ids))
#loop over all cells in chunk
for i in enumerate(chunk_ids):
#get neighbors and distances
#read line i from indices matrix
get_col = indices[str(i[1])]
if get_col.isnull().sum()>0:
#not enough neighbors
print(str(i[1]) + " has not enough neighbors.")
simpson[i[0]] = 1 # np.nan #set nan for testing
continue
else:
knn_idx = get_col.astype('int') -1 #get 0-based indexing
#read line i from distances matrix
D_act = distances[str(i[1])].values.astype('float')
#start lisi estimation
beta = 1
# negative infinity
betamin = -np.inf
# positive infinity
betamax = np.inf
H, P = Hbeta(D_act, beta)
Hdiff = H - logU
tries = 0
#first get neighbor probabilities
while (np.logical_and(np.abs(Hdiff) > tol, tries < 50)):
if (Hdiff > 0):
betamin = beta
if (betamax == np.inf):
beta *= 2
else:
beta = (beta + betamax) / 2
else:
betamax = beta
if (betamin == -np.inf):
beta /= 2
else:
beta = (beta + betamin) / 2
H, P = Hbeta(D_act, beta)
Hdiff = H - logU
tries += 1
if (H == 0):
simpson[i[0]] = -1
continue
#then compute Simpson's Index
batch = batch_labels[knn_idx]
B = convertToOneHot(batch, n_batches)
sumP = np.matmul(P,B) #sum P per batch
simpson[i[0]] = np.dot(sumP, sumP) #sum squares
return simpson
#function to prepare call of compute_simpson_index
def lisi_graph_py(adata, batch_key, n_neighbors = 90, perplexity=None, subsample = None,
multiprocessing = None, nodes = None, verbose=False):
"""
Compute LISI score on shortes path based on kNN graph provided in the adata object.
By default, perplexity is chosen as 1/3 * number of nearest neighbours in the knn-graph.
"""
if 'neighbors' not in adata.uns:
raise AttributeError(f"key 'neighbors' not found. Please make sure that a " +
"kNN graph has been computed")
elif verbose:
print("using precomputed kNN graph")
#get knn index matrix
if verbose:
print("Convert nearest neighbor matrix and distances for LISI.")
batch = adata.obs[batch_key].cat.codes.values
n_batches = len(np.unique(adata.obs[batch_key]))
if perplexity is None or perplexity >=n_neighbors:
# use LISI default
perplexity = np.floor(n_neighbors/3)
#setup subsampling
subset = 100 #default, no subsampling
if subsample is not None:
subset = subsample #do not use subsampling
if isinstance(subsample, int) == False: #need to set as integer
subset = int(subsample)
# run LISI in python
if verbose:
print("Compute knn on shortest paths")
#define number of chunks
n_chunks = 1
if multiprocessing is not None:
#set up multiprocessing
if nodes is None:
#take all but one CPU and 1 CPU, if there's only 1 CPU.
n_cpu = multiprocessing.cpu_count()
n_processes = np.max([ n_cpu,
np.ceil(n_cpu/2)]).astype('int')
else:
n_processes = nodes
#update numbr of chunks
n_chunks = n_processes
#create temporary directory
dir_path = "/tmp/lisi_tmp"+str(int(time()))
while path.isdir(dir_path):
dir_path += '2'
dir_path += '/'
mkdir(dir_path)
#write to temporary directory
mtx_file_path = dir_path + 'input.mtx'
mmwrite(mtx_file_path,
adata.uns['neighbors']['connectivities'],
symmetry='general')
# call knn-graph computation in Cpp
root = pathlib.Path(__file__).parent #get current root directory
cpp_file_path = root / 'knn_graph/knn_graph.o' #create POSIX path to file to execute compiled cpp-code
#comment: POSIX path needs to be converted to string - done below with 'as_posix()'
#create evenly split chunks if n_obs is divisible by n_chunks (doesn't really make sense on 2nd thought)
n_splits = n_chunks -1
args_int = [cpp_file_path.as_posix(), mtx_file_path, dir_path, str(n_neighbors), str(n_splits), str(subset)]
subprocess.run(args_int)
if verbose:
print("LISI score estimation")
#do the simpson call
if multiprocessing is not None:
if verbose:
print(f"{n_processes} processes started.")
pool = multiprocessing.Pool(processes=n_processes)
count = np.arange(0, n_processes)
#create argument list for each worker
results = pool.starmap(compute_simpson_index_graph, zip(itertools.repeat(dir_path),
itertools.repeat(batch),
itertools.repeat(n_batches),
itertools.repeat(n_neighbors),
itertools.repeat(perplexity),
count))
pool.close()
pool.join()
simpson_est_batch = 1/np.concatenate(results)
else:
simpson_estimate_batch = compute_simpson_index_graph(input_path = dir_path,
batch_labels = batch,
n_batches = n_batches,
perplexity = perplexity,
n_neighbors = n_neighbors,
chunk_no = None
)
simpson_est_batch = 1/simpson_estimate_batch
# extract results
d = {batch_key : simpson_est_batch}
lisi_estimate = pd.DataFrame(data=d, index=np.arange(0,len(simpson_est_batch)))
return lisi_estimate
#LISI graph function (analoguous to lisi function)
def lisi_graph(adata, batch_key=None, label_key=None, k0=90, type_= None,
subsample = None, scale=True,
multiprocessing = None, nodes = None, verbose=False):
"""
Compute lisi score (after integration)
params:
adata: adata object to calculate on
batch_key: variable to compute iLISI on
label_key: variable to compute cLISI on
k0: number of nearest neighbors to compute lisi score
Please note that the initial neighborhood size that is
used to compute shortest paths is 15.
type_: type of data integration, either knn, full or embed
subsample: Percentage of observations (integer between 0 and 100)
to which lisi scoring should be subsampled
scale: scale output values (True/False)
multiprocessing: parallel computation of LISI scores, if None, no parallisation
via multiprocessing is performed
nodes: number of nodes (i.e. CPUs to use for multiprocessing); ignored, if
multiprocessing is set to None
return:
pd.DataFrame with median cLISI and median iLISI scores
(following the harmony paper)
"""
checkAdata(adata)
checkBatch(batch_key, adata.obs)
checkBatch(label_key, adata.obs)
#recompute neighbours
if (type_ == 'embed'):
adata_tmp = sc.pp.neighbors(adata,n_neighbors=15, use_rep = 'X_emb', copy=True)
if (type_ == 'full'):
if 'X_pca' not in adata.obsm.keys():
sc.pp.pca(adata, svd_solver = 'arpack')
adata_tmp = sc.pp.neighbors(adata, n_neighbors=15, copy=True)
else:
adata_tmp = adata.copy()
#if knn - do not compute a new neighbourhood graph (it exists already)
#compute LISI score
ilisi_score = lisi_graph_py(adata = adata, batch_key = batch_key,
n_neighbors = k0, perplexity=None, subsample = subsample,
multiprocessing = multiprocessing, nodes = nodes, verbose=verbose)
clisi_score = lisi_graph_py(adata = adata, batch_key = label_key,
n_neighbors = k0, perplexity=None, subsample = subsample,
multiprocessing = multiprocessing, nodes = nodes, verbose=verbose)
# iLISI: nbatches good, 1 bad
ilisi_score = np.nanmedian(ilisi_score)
# cLISI: 1 good, nbatches bad
clisi_score = np.nanmedian(clisi_score)
if scale:
#get number of batches
nbatches = len(np.unique(adata.obs[batch_key]))
ilisi_score, clisi_score = scale_lisi(ilisi_score, clisi_score, nbatches)
return ilisi_score, clisi_score
def scale_lisi(ilisi_score, clisi_score, nbatches):
#scale iLISI score to 0 bad 1 good
ilisi_score = (ilisi_score - 1)/(nbatches-1)
#scale clisi score to 0 bad 1 good
clisi_score = (nbatches - clisi_score)/(nbatches-1)
return ilisi_score, clisi_score
### kBET
def kBET_single(matrix, batch, type_ = None, k0 = 10, knn=None, subsample=0.5, heuristic=True, verbose=False):
"""
params:
matrix: expression matrix (at the moment: a PCA matrix, so do.pca is set to FALSE
batch: series or list of batch assignemnts
subsample: fraction to be subsampled. No subsampling if `subsample=None`
returns:
kBET p-value
"""
anndata2ri.activate()
ro.r("library(kBET)")
if verbose:
print("importing expression matrix")
ro.globalenv['data_mtrx'] = matrix
ro.globalenv['batch'] = batch
#print(matrix.shape)
#print(len(batch))
if verbose:
print("kBET estimation")
#k0 = len(batch) if len(batch) < 50 else 'NULL'
ro.globalenv['knn_graph'] = knn
ro.globalenv['k0'] = k0
batch_estimate = ro.r(f"batch.estimate <- kBET(data_mtrx, batch, knn=knn_graph, k0=k0, plot=FALSE, do.pca=FALSE, heuristic=FALSE, adapt=FALSE, verbose={str(verbose).upper()})")
anndata2ri.deactivate()
try:
ro.r("batch.estimate$average.pval")[0]
except rpy2.rinterface_lib.embedded.RRuntimeError:
return np.nan
else:
return ro.r("batch.estimate$average.pval")[0]
def kBET(adata, batch_key, label_key, embed='X_pca', type_ = None,
hvg=False, subsample=0.5, heuristic=False, verbose=False):
"""
Compare the effect before and after integration
params:
matrix: matrix from adata to calculate on
return:
pd.DataFrame with kBET p-values per cluster for batch
"""
checkAdata(adata)
checkBatch(batch_key, adata.obs)
checkBatch(label_key, adata.obs)
#compute connectivities for non-knn type data integrations
#and increase neighborhoods for knn type data integrations
if type_ != 'knn':
adata_tmp = sc.pp.neighbors(adata, n_neighbors = 50, use_rep=embed, copy=True)
else:
#check if pre-computed neighbours are stored in input file
adata_tmp = adata.copy()
if 'diffusion_connectivities' not in adata.uns['neighbors']:
if verbose:
print(f"Compute: Diffusion neighbours.")
adata_tmp = diffusion_conn(adata, min_k = 50, copy = True)
adata_tmp.uns['neighbors']['connectivities'] = adata_tmp.uns['neighbors']['diffusion_connectivities']
if verbose:
print(f"batch: {batch_key}")
#set upper bound for k0
size_max = 2**31 - 1
kBET_scores = {'cluster': [], 'kBET': []}
for clus in adata_tmp.obs[label_key].unique():
adata_sub = adata_tmp[adata_tmp.obs[label_key] == clus,:].copy()
#check if neighborhood size too small or only one batch in subset
if np.logical_or(adata_sub.n_obs < 10,
len(adata_sub.obs[batch_key].cat.categories)==1):
print(f"{clus} consists of a single batch or is too small. Skip.")
score = np.nan
else:
quarter_mean = np.floor(np.mean(adata_sub.obs[batch_key].value_counts())/4).astype('int')
k0 = np.min([70, np.max([10, quarter_mean])])
#check k0 for reasonability
if (k0*adata_sub.n_obs) >=size_max:
k0 = np.floor(size_max/adata_sub.n_obs).astype('int')
matrix = np.zeros(shape=(adata_sub.n_obs, k0+1))
if verbose:
print(f"Use {k0} nearest neighbors.")
n_comp, labs = connected_components(adata_sub.uns['neighbors']['connectivities'],
connection='strong')
if n_comp > 1:
#check the number of components where kBET can be computed upon
comp_size = pd.value_counts(labs)
#check which components are small
comp_size_thresh = 3*k0
idx_nonan = np.flatnonzero(np.in1d(labs,
comp_size[comp_size>=comp_size_thresh].index))
#check if 75% of all cells can be used for kBET run
if len(idx_nonan)/len(labs) >= 0.75:
#create another subset of components, assume they are not visited in a diffusion process
adata_sub_sub = adata_sub[idx_nonan,:].copy()
nn_index_tmp = np.empty(shape=(adata_sub.n_obs, k0))
nn_index_tmp[:] = np.nan
nn_index_tmp[idx_nonan] = diffusion_nn(adata_sub_sub, k=k0).astype('float')
#need to check neighbors (k0 or k0-1) as input?
score = kBET_single(
matrix=matrix,
batch=adata_sub.obs[batch_key],
knn = nn_index_tmp+1, #nn_index in python is 0-based and 1-based in R
subsample=subsample,
verbose=verbose,
heuristic=False,
k0 = k0,
type_ = type_
)
else:
#if there are too many too small connected components, set kBET score to 1
#(i.e. 100% rejection)
score = 1
else: #a single component to compute kBET on
#need to check neighbors (k0 or k0-1) as input?
nn_index_tmp = diffusion_nn(adata_sub, k=k0).astype('float')
score = kBET_single(
matrix=matrix,
batch=adata_sub.obs[batch_key],
knn = nn_index_tmp+1, #nn_index in python is 0-based and 1-based in R
subsample=subsample,
verbose=verbose,
heuristic=False,
k0 = k0,
type_ = type_
)
kBET_scores['cluster'].append(clus)
kBET_scores['kBET'].append(score)
kBET_scores = | pd.DataFrame.from_dict(kBET_scores) | pandas.DataFrame.from_dict |
import pandas as pd
from datetime import date, datetime
from functools import wraps
import importlib_resources
import requests
import time
import os
import io
token = "<KEY>"
#<PASSWORD>
#8a0ff681501b0bac557bf90fe6a036f7
def counter(func):
"""
A decorator that counts how many times we executed a funciton.
In our case we use it to track how many times we executed request()
to do not exceed API 1000 requests/hour limit. When we aproach
the limit functino automatically time sleeps.
"""
@wraps(func)
def wrapper(*args, **kwargs):
wrapper.count += 1
if wrapper.count == 2:
global start #we like to live dangerously ;) (it is neccessary here to have global var; otherwise it would not be recognized at line 32)
start = time.time()
if wrapper.count == 998:
end = time.time()
wait_time = end - start + 120
print("You aproached the limit of requests per hour. The download will automatically continue after " + str(int(wait_time)) + " seconds.")
time.sleep(wait_time)
return func(*args, **kwargs)
else:
return func(*args, **kwargs)
wrapper.count = 1
return wrapper
@counter
def request(token, page = 1, items_per_page = 5000, start_date = "1.1.2020", end_date = "24.12.2021", pause = 0.1):
"""
Request data from API and returs the response in json. The data in API are bounded to pages,
one request obtains data for onepage (5000 rows)
Parameters
----------
token : str
input token for the API - can be obatained here: https://onemocneni-aktualne.mzcr.cz/vytvorit-ucet
page : int
specifies page which will be downloaded (default 1)
items_per_page : int
number of rows per page (defualt 5000)
start_date = str
begining date of the dataset - datum in format "dd.mm.YYYY" (default is "1.1.2020")
end_date = str
end date of the dataset - datum in format "dd.mm.YYYY" (default is "24.12.2021")
pause : int
to not overload the API (default 0.1)
Raises
------
Exception
if response of API is not 200 or 429.
Returns
-------
r
response of the API, in json
"""
url = "https://onemocneni-aktualne.mzcr.cz/api/v3/osoby?page={a}&itemsPerPage={b}&datum%5Bbefore%5D={d}&datum%5Bafter%5D={c}".format(a = page, b = items_per_page, c = start_date, d = end_date)
r = requests.get(url, {"apiToken": token})
if r.status_code == 200:
None
elif r.status_code == 429: #API limit per request reached
msg = r.json()["message"] #shows message with info about when next request can be made
t = "".join(a for a in msg if a.isdigit())
print("Holy Moly! You exceeded the requests limit per hour, now you need to wait " + t + " seconds...")
time.sleep(int(t)+60)
request.count = 1
start = time.time()
r = request(token, page)
else: #In case of different errors
raise Exception("Status code: " + r.status_code, "Error message: " + r.text, "Stopped on page: " + str(page))
time.sleep(pause)
return r
def get_total_pages(token, start_date = "1.1.2020", end_date = "24.12.2021"):
"""
Indetify how much pages needs to be downloaded to download whole dataset for given
start date and date.
Parameters
----------
token : str
input token for the API - can be obatained here: https://onemocneni-aktualne.mzcr.cz/vytvorit-ucet
start_date : str
begining date of the dataset - datum in format "dd.mm.YYYY" (default is "1.1.2020")
end_date : str
end date of the dataset - datum in format "dd.mm.YYYY" (default is "24.12.2021")
Returns
-------
total_pages : int
total number of pages for given period
"""
r = request(token, start_date = start_date, end_date = end_date)
total_pages = int(r.json()["hydra:view"]["hydra:last"].split("=")[-1])
return total_pages
def get_total_items(token, start_date = "1.1.2020", end_date = "24.12.2021"):
"""
Indetify how much rows is in the dataset for gievn start date and end date
Parameters
----------
token : str
input token for the API - can be obatained here: https://onemocneni-aktualne.mzcr.cz/vytvorit-ucet
start_date = str
begining date of the dataset - datum in format "dd.mm.YYYY" (default is "1.1.2020")
end_date = str
end date of the dataset - datum in format "dd.mm.YYYY" (default is "24.12.2021")
Returns
-------
total_items : int
total number of rows in dataset for given time period
"""
r = request(token, start_date = start_date, end_date = end_date)
total_items = int(r.json()['hydra:totalItems'])
return total_items
def get_vacination():
r = requests.get("https://onemocneni-aktualne.mzcr.cz/api/v2/covid-19/ockovani-pozitivni-hospitalizovani.csv-metadata.json")
url = "https://onemocneni-aktualne.mzcr.cz/api/v2/covid-19/" + r.json()["url"]
csv = requests.get("url")
csv = csv.content.decode('utf8')
csv_df = pd.read_csv(io.StringIO(csv))
csv_df.to_parquet('dataz.gzip',compression='gzip')
return csv_df
def duplicates_handling(df, i, P, pdf, total_len, start_date = "1.1.2020", end_date = "24.12.2021"):
"""Search for values that were not downloaded due to duplicates.
The API provides data based on pages - each pages can contain only certain amount of rows (in our case 5000). But we are
downloading dataset with more than 2mil. rows, hence we need to download about 500 pages and merge them together.
Unforunatelly, the data on each page are not exactly ordered and it may happend that same value is on page 1 and page 2,
for example. In other words, the obervations are not entirely fixed to specific row, thus when we request for page 1 many time
we do not get exactly the same results for each request. We solved it by indetifying if there was any duplicates and if yes,
then we iterate for multiple times in neighbourhood of the page untill we get the missed values.
Parameters
----------
df : dataframe
dataframe with covid data
i : int
a page where we curretly are
P : dic
dictionary that stores duplicates P = {page:duplicates}
start_date = str
begining date of the dataset - datum in format "dd.mm.YYYY" (default is "1.1.2020")
end_date = str
end date of the dataset - datum in format "dd.mm.YYYY" (default is "24.12.2021")
Returns
-------
df
returns the dataframe hopefully with more rows
"""
duplicates = pdf + 10000 - len(df) #if len(df), after download of two pages, did not increse by 10000 -> duplicates
#print("duplicates: ", duplicates)
if duplicates > 0:
print("Handling duplicates...")
m = 1 # defiend to prevent infinite while loop
while duplicates > 0: #should handle missing values due to duplicates
if m == 8: #stops, if it does not find it can still happen that whole dataset will be downloaded (sometimes it finds more than is the actual number of duplicates)
if total_len + 10000 - duplicates > i * 5000:
print("succesful")
P[i] = duplicates
else:
print("unsucceseful")
P[i] = duplicates
break
elif m % 2 == 0: #softer force, 5000 rows per page
for j in range(max(i - 2, 1), i + 1):
e = request(token, j, start_date = start_date, end_date = end_date)
df = df.merge(pd.DataFrame.from_dict(e.json()["hydra:member"]), how = "outer").drop_duplicates()
duplicates = pdf + 10000 - len(df)
#print("small", duplicates)
else: #harder force 10000 rows per page
for n in range(max(int(i/2) - 1, 1), int(i/2) + 1):
e = request(token, n, 10000, start_date = start_date, end_date = end_date)
df = df.merge(pd.DataFrame.from_dict(e.json()["hydra:member"]), how = "outer").drop_duplicates()
duplicates = pdf + 10000 - len(df)
#print("big", duplicates)
m += 1
if m < 5:
print("Solved!")
P[i] = duplicates
return df
def saving_interim_results(df, i):
"""
Saves partial downloads of the dataframe to your folder. The saving happens every 50 pages.
It enables the code to run faster as when the part of the dataset is saved it is also drop. The data are
saved as parquet with snappy compression, b/c it is fast to load. So we maximally
work with df of length 280 000. And if your download is interapted you then do not need to start over again and
can begin close to where you stoped.
Parameters
----------
df : dataframe
dataframe with covid data
i : int
a page on which the download is
Returns
-------
df
last 30000 rows of your dataframe (the dataframe is not drop entirely, b/c there might
be duplicaes between pages, so 30000 rows are left)
"""
df.to_parquet('data{a}.parquet'.format(a = int(i/50)), compression = 'snappy')
df = pd.read_parquet('data{a}.parquet'.format(a = int(i/50)), engine = 'fastparquet').iloc[-30000:]
return df
def merging_interim_results(pages_total, intial_df = "1"):
"""
Merges all the interim results created by function saving_interim_results(df, i) into final data set. And attemps
to delete the interim results from your folder. We save the fianl dataset
with .gzip compressino, which should be the best for space limition in parquet.
Parameters
----------
pages_total : int
total number of pages for given period
intial_df : str
a first interim result
Returns
-------
data
the final downloaded dataset
"""
L = list(range(2, int(pages_total/50) + 2)) #list of numbers of saved interim datasets
data = pd.read_parquet('data{a}.parquet'.format(a = intial_df), engine = 'fastparquet')
cwd = os.getcwd()
os.remove(cwd + "/data{a}.parquet".format(a = 1))
for j in L:
data = data.merge(pd.read_parquet('data{a}.parquet'.format(a = j), engine = 'fastparquet'), how = "outer")
try:
cwd = os.getcwd()
os.remove(cwd + "/data{a}.parquet".format(a = j)) #removes saved interim dataset
except:
None
return data
class Covid_Data:
"""A class used to manage covid data - storing, downloading and upadating
...
Attributes
----------
data : pandas data frame
data frame of the covid data
info : dic
dictionary of information regarding covid data in attribute data (total cases(rows), start_date, end_date)
total_pages : int
information regarding how pages needs to be requested from API (loads by calling method get_page(token, items_per_page = 5000))
my_page : int
states on what page is your data set, helpful when only fraction of data were donwloaded
(loads by calling method get_page(token, items_per_page = 5000))
Methods
-------
get_info()
loads info about the covid data in attribute data
get_page(token, items_per_page = 5000)
obtain info about how many pages were downloaded out of total pages (API send the data in pages)
downloader(token, start_page = 1, start_date = "1.1.2020", end_date = "24.12.2021", upd = "N")
downloads covid data from API
updater(token, end_date = date.today())
updates covid data stored in attribute data
"""
def __init__(self):
"""
Parameters
----------
data : int, dataframe
if you already downloaded covid that then input the dataframe, otherwise input 0 - the data can be donwloaded by method download
"""
print("Class initialize, if you want to load data provided by this package - use method load_data() or you can download it on your own using method download(*args, *kwargs) You can access documentation at: "+str(importlib_resources.files("app"))+"/docs/_build/html/index.html")
self.data = 0
self.info = {"total cases": [],
"start_date": [],
"end_date": []}
if isinstance(self.data, int):
print("No data loaded or downloaded.")
else:
print("The provided data were loaded.")
self.get_info()
def load_data(self):
"""
loads data stored in package (from 1.3.2020 - 24.12.2021)
"""
my_resources = importlib_resources.files("app")
path = (str(my_resources) + "/data/datacovid.bz2")
self.data = pd.read_pickle(path, compression='bz2')
self.get_info()
print("Data loaded")
def get_info(self):
"""
loads info about the covid data in attribute data
if no data frame is loaded/downloaded yet it returns empty dictionary
"""
self.info["total cases"] = len(self.data)
self.data.datum = pd.to_datetime(self.data.datum)
self.data = self.data.sort_values(by = ["datum"])
self.info["start_date"] = str(self.data.iloc[1].datum.strftime("%d.%m.%Y"))
self.info["end_date"] = str(self.data.iloc[-1].datum.strftime("%d.%m.%Y"))
def get_page(self, token):
"""
obtain info about how many pages were downloaded out of total pages (API send the data in pages)
Parameters
----------
token : str
input token for the API - can be obatained here: https://onemocneni-aktualne.mzcr.cz/vytvorit-ucet
"""
self.total_pages = get_total_pages(token, start_date = self.info["start_date"], end_date = self.info["end_date"])
self.my_page = int(len(self.data)/5000) + 1
self.to_update = get_total_pages(token, start_date = self.info["start_date"], end_date = date.today())
print("You downloaded " + str(self.my_page) + " pages out of total " + str(self.total_pages) + " pages. \nTo upadte your dataset to today date you need to get total: " + str(self.to_update)+ " pages")
def downloader(self, token, start_page = 1, start_date = "1.1.2020", end_date = "24.12.2021", upd = "N"):
"""
downloads covid data from API
Parameters
----------
token : str
input token for the API - can be obatained here: https://onemocneni-aktualne.mzcr.cz/vytvorit-ucet
start_page : int
declare on page you want to start the download - if you begin then 1, if you already downloaded some part you can resume
but page where you stoped needs to be specialzed, it can be found out througt method get_page() (default is 1)
start_date = str
begining of the covid data - datum in format "dd.mm.YYYY" (default is "1.1.2020")
end_date = str
end of the covid data - datum in format "dd.mm.YYYY" (default is "24.12.2021")
upd = str
only used by updater, irelevant for you (default is "N")
"""
if start_page == 1: #if you begin your download
r_0 = request(token, start_page, start_date = start_date, end_date = end_date)
df = pd.DataFrame.from_dict(r_0.json()["hydra:member"])
total_len = len(df)
pdf = 0
pr_total_len = 0
else: #if you continue from specified page
start_page = int(start_page/50) * 50
df = | pd.read_parquet('data1.parquet', engine='fastparquet') | pandas.read_parquet |
# author <NAME>
import os
import pickle
from collections import defaultdict
import click
import cv2
import pandas as pd
from datetime import datetime
from glob import glob
def validate_files():
"""
Check if files created from faces_train.py script
"""
recognizer_f = glob("./recognizers/*.yml")
pickle_f = glob("./pickles/*.pickle")
if not len(recognizer_f) or not len(pickle_f):
raise Exception("Missing files for recognizing people. Please create a dataset and run faces_train.py first.")
def validate_folder_structure():
"""
Check if folder structure is correct
"""
if not os.path.isdir("./cascades/data/") or \
not os.path.isdir("./recognizers") or \
not os.path.isdir("./pickles") or \
not os.path.isdir("./reports"):
raise Exception("Missing compulsory folder structure. Please do git checkout.")
def process_data(people_logger):
"""
Creates a dictionary of DataFrames from the log and write the results to csv file.
:param people_logger: input data from face recognition
:return: dictionary of DataFrames
"""
# create dictionary DataFrames with name as a key and times
ppl_logger_df = {key: | pd.DataFrame(people_logger[key]) | pandas.DataFrame |
# --------------
# import packages
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# Load Offers
offers=pd.read_excel(path,sheet_name=0)
# Load Transactions
transactions=pd.read_excel(path,sheet_name=1)
transactions['n']=1
# Merge dataframes
df=pd.merge(offers,transactions)
# Look at the first 5 rows
print(df.head())
# --------------
# Code starts here
# create pivot table
matrix=pd.pivot_table(df,index='Customer Last Name',columns='Offer #',values='n')
# replace missing values with 0
matrix.fillna(0,inplace=True)
# reindex pivot table
matrix.reset_index(inplace=True)
# display first 5 rows
print(matrix.head())
# Code ends here
# --------------
# import packages
from sklearn.cluster import KMeans
# Code starts here
# initialize KMeans object
cluster=KMeans(n_clusters=5,init='k-means++',max_iter=300,n_init=10,random_state=0)
# create 'cluster' column
matrix['cluster']=cluster.fit_predict(matrix[matrix.columns[1:]])
print(matrix.head())
# Code ends here
# --------------
# import packages
from sklearn.decomposition import PCA
# Code starts here
# initialize pca object with 2 components
pca=PCA(n_components=2,random_state=0)
# create 'x' and 'y' columns donoting observation locations in decomposed form
matrix['x']=pca.fit_transform(matrix[matrix.columns[1:]])[:,0]
matrix['y']=pca.fit_transform(matrix[matrix.columns[1:]])[:,1]
print(matrix.head())
# dataframe to visualize clusters by customer names
clusters= | pd.DataFrame(data=matrix,columns=['Customer Last Name','cluster','x','y']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Generator reserve plots.
This module creates plots of reserve provision and shortage at the generation
and region level.
@author: <NAME>
"""
import logging
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import marmot.config.mconfig as mconfig
from marmot.plottingmodules.plotutils.plot_library import SetupSubplot, PlotLibrary
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, MissingZoneData)
class MPlot(PlotDataHelper):
"""reserves MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The reserves.py module contains methods that are
related to reserve provision and shortage.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, self.TECH_SUBSET,
Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
def reserve_gen_timeseries(self, figure_name: str = None, prop: str = None,
start: float = None, end: float= None,
timezone: str = "", start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a generation timeseries stackplot of total cumulative reserve provision by tech type.
The code will create either a facet plot or a single plot depending on
if the Facet argument is active.
If a facet plot is created, each scenario is plotted on a separate facet,
otherwise all scenarios are plotted on a single plot.
To make a facet plot, ensure the work 'Facet' is found in the figure_name.
Generation order is determined by the ordered_gen_categories.csv.
Args:
figure_name (str, optional): User defined figure output name. Used here
to determine if a Facet plot should be created.
Defaults to None.
prop (str, optional): Special argument used to adjust specific
plot settings. Controlled through the plot_select.csv.
Opinions available are:
- Peak Demand
- Date Range
Defaults to None.
start (float, optional): Used in conjunction with the prop argument.
Will define the number of days to plot before a certain event in
a timeseries plot, e.g Peak Demand.
Defaults to None.
end (float, optional): Used in conjunction with the prop argument.
Will define the number of days to plot after a certain event in
a timeseries plot, e.g Peak Demand.
Defaults to None.
timezone (str, optional): The timezone to display on the x-axes.
Defaults to "".
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
# If not facet plot, only plot first scenario
facet=False
if 'Facet' in figure_name:
facet = True
if not facet:
Scenarios = [self.Scenarios[0]]
else:
Scenarios = self.Scenarios
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"reserves_generators_Provision",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for region in self.Zones:
self.logger.info(f"Zone = {region}")
ncols, nrows = self.set_facet_col_row_dimensions(facet,multi_scenario=Scenarios)
grid_size = ncols*nrows
excess_axs = grid_size - len(Scenarios)
mplt = PlotLibrary(nrows, ncols, sharey=True,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.05, hspace=0.2)
data_tables = []
for n, scenario in enumerate(Scenarios):
self.logger.info(f"Scenario = {scenario}")
reserve_provision_timeseries = self["reserves_generators_Provision"].get(scenario)
#Check if zone has reserves, if not skips
try:
reserve_provision_timeseries = reserve_provision_timeseries.xs(region,level=self.AGG_BY)
except KeyError:
self.logger.info(f"No reserves deployed in: {scenario}")
continue
reserve_provision_timeseries = self.df_process_gen_inputs(reserve_provision_timeseries)
if reserve_provision_timeseries.empty is True:
self.logger.info(f"No reserves deployed in: {scenario}")
continue
# unitconversion based off peak generation hour, only checked once
if n == 0:
unitconversion = self.capacity_energy_unitconversion(reserve_provision_timeseries,
sum_values=True)
if prop == "Peak Demand":
self.logger.info("Plotting Peak Demand period")
total_reserve = reserve_provision_timeseries.sum(axis=1)/unitconversion['divisor']
peak_reserve_t = total_reserve.idxmax()
start_date = peak_reserve_t - dt.timedelta(days=start)
end_date = peak_reserve_t + dt.timedelta(days=end)
reserve_provision_timeseries = reserve_provision_timeseries[start_date : end_date]
Peak_Reserve = total_reserve[peak_reserve_t]
elif prop == 'Date Range':
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
reserve_provision_timeseries = reserve_provision_timeseries[start_date_range : end_date_range]
else:
self.logger.info("Plotting graph for entire timeperiod")
reserve_provision_timeseries = reserve_provision_timeseries/unitconversion['divisor']
scenario_names = pd.Series([scenario] * len(reserve_provision_timeseries),name = 'Scenario')
data_table = reserve_provision_timeseries.add_suffix(f" ({unitconversion['units']})")
data_table = data_table.set_index([scenario_names],append = True)
data_tables.append(data_table)
mplt.stackplot(reserve_provision_timeseries,
color_dict=self.PLEXOS_color_dict,
labels=reserve_provision_timeseries.columns,
sub_pos=n)
mplt.set_subplot_timeseries_format(sub_pos=n)
if prop == "Peak Demand":
axs[n].annotate('Peak Reserve: \n' + str(format(int(Peak_Reserve), '.2f')) + ' {}'.format(unitconversion['units']),
xy=(peak_reserve_t, Peak_Reserve),
xytext=((peak_reserve_t + dt.timedelta(days=0.25)), (Peak_Reserve + Peak_Reserve*0.05)),
fontsize=13, arrowprops=dict(facecolor='black', width=3, shrink=0.1))
if not data_tables:
self.logger.warning(f'No reserves in {region}')
out = MissingZoneData()
outputs[region] = out
continue
# Add facet labels
mplt.add_facet_labels(xlabels=self.xlabels,
ylabels = self.ylabels)
# Add legend
mplt.add_legend(reverse_legend=True, sort_by=self.ordered_gen)
#Remove extra axes
mplt.remove_excess_axs(excess_axs,grid_size)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(region)
plt.ylabel(f"Reserve Provision ({unitconversion['units']})",
color='black', rotation='vertical', labelpad=40)
data_table_out = pd.concat(data_tables)
outputs[region] = {'fig': fig, 'data_table': data_table_out}
return outputs
def total_reserves_by_gen(self, start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a generation stacked barplot of total reserve provision by generator tech type.
A separate bar is created for each scenario.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"reserves_generators_Provision",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for region in self.Zones:
self.logger.info(f"Zone = {region}")
Total_Reserves_Out = | pd.DataFrame() | pandas.DataFrame |
"""
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from collections import defaultdict
from datetime import timedelta
from textwrap import dedent
from typing import List, Optional, Set
import warnings
import numpy as np
import pandas._libs.window as libwindow
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.core.dtypes.common import (
ensure_float64,
is_bool,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDateOffset,
ABCDatetimeIndex,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas._typing import Axis, FrameOrSeries
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.generic import _shared_docs
from pandas.core.groupby.base import GroupByMixin
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
class _Window(PandasObject, SelectionMixin):
_attributes = [
"window",
"min_periods",
"center",
"win_type",
"axis",
"on",
"closed",
] # type: List[str]
exclusions = set() # type: Set[str]
def __init__(
self,
obj,
window=None,
min_periods: Optional[int] = None,
center: Optional[bool] = False,
win_type: Optional[str] = None,
axis: Axis = 0,
on: Optional[str] = None,
closed: Optional[str] = None,
**kwargs
):
self.__dict__.update(kwargs)
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self) -> Optional[bool]:
return None
@property
def _on(self):
return None
@property
def is_freq_type(self) -> bool:
return self.win_type == "freq"
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in [
"right",
"both",
"left",
"neither",
]:
raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'")
def _create_blocks(self):
"""
Split data into blocks & return conformed data.
"""
obj = self._selected_obj
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : str / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
"%r object has no attribute %r" % (type(self).__name__, attr)
)
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self) -> str:
return self.__class__.__name__
def __repr__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
"{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None
)
return "{klass} [{attrs}]".format(
klass=self._window_type, attrs=",".join(attrs)
)
def __iter__(self):
url = "https://github.com/pandas-dev/pandas/issues/11704"
raise NotImplementedError("See issue #11704 {url}".format(url=url))
def _get_index(self) -> Optional[np.ndarray]:
"""
Return index as an ndarray.
Returns
-------
None or ndarray
"""
if self.is_freq_type:
return self._on.asi8
return None
def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert input to numpy arrays for Cython routines"""
if values is None:
values = getattr(self._selected_obj, "values", self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError(
"ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(action=self._window_type, dtype=values.dtype)
)
else:
try:
values = ensure_float64(values)
except (ValueError, TypeError):
raise TypeError(
"cannot handle this type -> {0}" "".format(values.dtype)
)
# Always convert inf to nan
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None) -> FrameOrSeries:
"""
Wrap a single result.
"""
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(result.ravel(), unit="ns").values.reshape(
result.shape
)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries:
"""
Wrap the results.
Parameters
----------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
exclude: list of columns to exclude, default to None
"""
from pandas import Series, concat
from pandas.core.index import ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
# exclude nuisance columns so that they are not reindexed
if exclude is not None and exclude:
columns = [c for c in columns if c not in exclude]
if not columns:
raise DataError("No numeric types to aggregate")
if not len(final):
return obj.astype("float64")
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window) -> np.ndarray:
"""
Center the result in the window.
"""
if self.axis > result.ndim - 1:
raise ValueError(
"Requested axis is larger then no. of argument " "dimensions"
)
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, func, *args, **kwargs):
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
return self.apply(func, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs["sum"] = dedent(
"""
Calculate %(name)s sum of given DataFrame or Series.
Parameters
----------
*args, **kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed value.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
%(name)s sum.
See Also
--------
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.rolling(3).sum()
0 NaN
1 NaN
2 6.0
3 9.0
4 12.0
dtype: float64
>>> s.expanding(3).sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
>>> s.rolling(3, center=True).sum()
0 NaN
1 6.0
2 9.0
3 12.0
4 NaN
dtype: float64
For DataFrame, each %(name)s sum is computed column-wise.
>>> df = pd.DataFrame({"A": s, "B": s ** 2})
>>> df
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> df.rolling(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 9.0 29.0
4 12.0 50.0
"""
)
_shared_docs["mean"] = dedent(
"""
Calculate the %(name)s mean of the values.
Parameters
----------
*args
Under Review.
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
The below examples will show rolling mean calculations with window sizes of
two and three, respectively.
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).mean()
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 2.0
3 3.0
dtype: float64
"""
)
class Window(_Window):
"""
Provide rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
`min_periods` will default to 1. Otherwise, `min_periods` will default
to the size of the window.
center : bool, default False
Set the labels at the center of the window.
win_type : str, default None
Provide a window type. If ``None``, all points are evenly weighted.
See the notes below for further information.
on : str, optional
For a DataFrame, a datetime-like column on which to calculate the rolling
window, rather than the DataFrame's index. Provided integer column is
ignored and excluded from result since an integer index is not used to
calculate the rolling window.
axis : int or str, default 0
closed : str, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
Returns
-------
a Window or Rolling sub-classed for the particular operation
See Also
--------
expanding : Provides expanding transformations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width)
* ``exponential`` (needs tau), center is set to None.
If ``win_type=None`` all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 0.5
2 1.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [pd.Timestamp('20130101 09:00:00'),
... pd.Timestamp('20130101 09:00:02'),
... pd.Timestamp('20130101 09:00:03'),
... pd.Timestamp('20130101 09:00:05'),
... pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
"""
def validate(self):
super().validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window <= 0:
raise ValueError("window must be > 0 ")
import_optional_dependency(
"scipy", extra="Scipy is required to generate window weight."
)
import scipy.signal as sig
if not isinstance(self.win_type, str):
raise ValueError("Invalid win_type {0}".format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError("Invalid win_type {0}".format(self.win_type))
else:
raise ValueError("Invalid window {0}".format(window))
def _prep_window(self, **kwargs):
"""
Provide validation for our window type, return the window
we have already been validated.
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com.asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {
"kaiser": ["beta"],
"gaussian": ["std"],
"general_gaussian": ["power", "width"],
"slepian": ["width"],
"exponential": ["tau"],
}
if win_type in arg_map:
win_args = _pop_args(win_type, arg_map[win_type], kwargs)
if win_type == "exponential":
# exponential window requires the first arg (center)
# to be set to None (necessary for symmetric window)
win_args.insert(0, None)
return tuple([win_type] + win_args)
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = "%s window requires %%s" % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : bool, default True
If True computes weighted mean, else weighted sum
Returns
-------
y : same type as input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj = self._create_blocks()
block_list = list(blocks)
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return libwindow.roll_window(
np.concatenate((arg, additional_nans)) if center else arg,
window,
minp,
avg=mean,
)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/DataFrame",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name="window")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name="window")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
"""
Provide the groupby facilities.
"""
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop("parent", None) # noqa
groupby = kwargs.pop("groupby", None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super().__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch("count")
corr = GroupByMixin._dispatch("corr", other=None, pairwise=None)
cov = GroupByMixin._dispatch("cov", other=None, pairwise=None)
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object.
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, str):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Rolling statistical measure using supplied function.
Designed to be used with passed-in Cython array-based functions.
Parameters
----------
func : str/callable to apply
name : str, optional
name of this function
window : int/array, default to _get_window()
center : bool, default to self.center
check_minp : function, default to _use_window
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj = self._create_blocks()
block_list = list(blocks)
index_as_array = self._get_index()
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, str):
cfunc = getattr(libwindow, func, None)
if cfunc is None:
raise ValueError(
"we do not support this function "
"in libwindow.{func}".format(func=func)
)
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = ensure_float64(arg)
return cfunc(arg, window, minp, index_as_array, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(
np.concatenate((x, additional_nans)),
window,
min_periods=self.min_periods,
closed=self.closed,
)
else:
def calc(x):
return func(
x, window, min_periods=self.min_periods, closed=self.closed
)
with np.errstate(all="ignore"):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
class _Rolling_and_Expanding(_Rolling):
_shared_docs["count"] = dedent(
r"""
The %(name)s count of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = pd.Series([2, 3, np.nan, 10])
>>> s.rolling(2).count()
0 1.0
1 2.0
2 1.0
3 1.0
dtype: float64
>>> s.rolling(3).count()
0 1.0
1 2.0
2 2.0
3 2.0
dtype: float64
>>> s.rolling(4).count()
0 1.0
1 2.0
2 2.0
3 3.0
dtype: float64
"""
)
def count(self):
blocks, obj = self._create_blocks()
# Validate the index
self._get_index()
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(
result,
window=window,
min_periods=0,
center=self.center,
axis=self.axis,
closed=self.closed,
).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs["apply"] = dedent(
r"""
The %(name)s function's apply function.
Parameters
----------
func : function
Must produce a single value from an ndarray input if ``raw=True``
or a single value from a Series if ``raw=False``.
raw : bool, default None
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` or ``None`` : the passed function will receive ndarray
objects instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
The `raw` parameter is required and will show a FutureWarning if
not passed. In the future `raw` will default to False.
.. versionadded:: 0.23.0
*args, **kwargs
Arguments and keyword arguments to be passed into func.
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
)
def apply(self, func, raw=None, args=(), kwargs={}):
from pandas import Series
kwargs.pop("_level", None)
window = self._get_window()
offset = _offset(window, self.center)
index_as_array = self._get_index()
# TODO: default is for backward compat
# change to False in the future
if raw is None:
warnings.warn(
"Currently, 'apply' passes the values as ndarrays to the "
"applied function. In the future, this will change to passing "
"it as Series objects. You need to specify 'raw=True' to keep "
"the current behaviour, and you can pass 'raw=False' to "
"silence this warning",
FutureWarning,
stacklevel=3,
)
raw = True
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
if not raw:
arg = Series(arg, index=self.obj.index)
return libwindow.roll_generic(
arg,
window,
minp,
index_as_array,
closed,
offset,
func,
raw,
args,
kwargs,
)
return self._apply(f, func, args=args, kwargs=kwargs, center=False, raw=raw)
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply("roll_sum", "sum", **kwargs)
_shared_docs["max"] = dedent(
"""
Calculate the %(name)s maximum.
Parameters
----------
*args, **kwargs
Arguments and keyword arguments to be passed into func.
"""
)
def max(self, *args, **kwargs):
nv.validate_window_func("max", args, kwargs)
return self._apply("roll_max", "max", **kwargs)
_shared_docs["min"] = dedent(
"""
Calculate the %(name)s minimum.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with a Series.
DataFrame.%(name)s : Calling object with a DataFrame.
Series.min : Similar method for Series.
DataFrame.min : Similar method for DataFrame.
Examples
--------
Performing a rolling minimum with a window size of 3.
>>> s = pd.Series([4, 3, 5, 2, 6])
>>> s.rolling(3).min()
0 NaN
1 NaN
2 3.0
3 2.0
4 2.0
dtype: float64
"""
)
def min(self, *args, **kwargs):
nv.validate_window_func("min", args, kwargs)
return self._apply("roll_min", "min", **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply("roll_mean", "mean", **kwargs)
_shared_docs["median"] = dedent(
"""
Calculate the %(name)s median.
Parameters
----------
**kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed median.
Returns
-------
Series or DataFrame
Returned type is the same as the original object.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.median : Equivalent method for Series.
DataFrame.median : Equivalent method for DataFrame.
Examples
--------
Compute the rolling median of a series with a window size of 3.
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.rolling(3).median()
0 NaN
1 NaN
2 1.0
3 2.0
4 3.0
dtype: float64
"""
)
def median(self, **kwargs):
return self._apply("roll_median_c", "median", **kwargs)
_shared_docs["std"] = dedent(
"""
Calculate %(name)s standard deviation.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in Series.std is different than the default
`ddof` of 0 in numpy.std.
A minimum of one period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).std()
0 NaN
1 NaN
2 0.577350
3 1.000000
4 1.000000
5 1.154701
6 0.000000
dtype: float64
>>> s.expanding(3).std()
0 NaN
1 NaN
2 0.577350
3 0.957427
4 0.894427
5 0.836660
6 0.786796
dtype: float64
"""
)
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func("std", args, kwargs)
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(
libwindow.roll_var(arg, window, minp, index_as_array, self.closed, ddof)
)
return self._apply(
f, "std", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs["var"] = dedent(
"""
Calculate unbiased %(name)s variance.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in :meth:`Series.var` is different than the
default `ddof` of 0 in :func:`numpy.var`.
A minimum of 1 period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).var()
0 NaN
1 NaN
2 0.333333
3 1.000000
4 1.000000
5 1.333333
6 0.000000
dtype: float64
>>> s.expanding(3).var()
0 NaN
1 NaN
2 0.333333
3 0.916667
4 0.800000
5 0.700000
6 0.619048
dtype: float64
"""
)
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func("var", args, kwargs)
return self._apply(
"roll_var", "var", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs[
"skew"
] = """
Unbiased %(name)s skewness.
Parameters
----------
**kwargs
Keyword arguments to be passed into func.
"""
def skew(self, **kwargs):
return self._apply(
"roll_skew", "skew", check_minp=_require_min_periods(3), **kwargs
)
_shared_docs["kurt"] = dedent(
"""
Calculate unbiased %(name)s kurtosis.
This function uses Fisher's definition of kurtosis without bias.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.kurt : Equivalent method for Series.
DataFrame.kurt : Equivalent method for DataFrame.
scipy.stats.skew : Third moment of a probability density.
scipy.stats.kurtosis : Reference SciPy method.
Notes
-----
A minimum of 4 periods is required for the %(name)s calculation.
"""
)
def kurt(self, **kwargs):
return self._apply(
"roll_kurt", "kurt", check_minp=_require_min_periods(4), **kwargs
)
_shared_docs["quantile"] = dedent(
"""
Calculate the %(name)s quantile.
Parameters
----------
quantile : float
Quantile to compute. 0 <= quantile <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.23.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
**kwargs:
For compatibility with other %(name)s methods. Has no effect on
the result.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.quantile : Computes value at the given quantile over all data
in Series.
DataFrame.quantile : Computes values at the given quantile over
requested axis in DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).quantile(.4, interpolation='lower')
0 NaN
1 1.0
2 2.0
3 3.0
dtype: float64
>>> s.rolling(2).quantile(.4, interpolation='midpoint')
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
"""
)
def quantile(self, quantile, interpolation="linear", **kwargs):
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
if quantile == 1.0:
return libwindow.roll_max(
arg, window, minp, index_as_array, self.closed
)
elif quantile == 0.0:
return libwindow.roll_min(
arg, window, minp, index_as_array, self.closed
)
else:
return libwindow.roll_quantile(
arg,
window,
minp,
index_as_array,
self.closed,
quantile,
interpolation,
)
return self._apply(f, "quantile", quantile=quantile, **kwargs)
_shared_docs[
"cov"
] = """
Calculate the %(name)s sample covariance.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
**kwargs
Keyword arguments to be passed into func.
"""
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
# GH 16058: offset window
if self.is_freq_type:
window = self.win_freq
else:
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype("float64")
Y = Y.astype("float64")
mean = lambda x: x.rolling(
window, self.min_periods, center=self.center
).mean(**kwargs)
count = (X + Y).rolling(window=window, center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise)
)
_shared_docs["corr"] = dedent(
"""
Calculate %(name)s correlation.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self.
pairwise : bool, default None
Calculate pairwise combinations of columns within a
DataFrame. If `other` is not specified, defaults to `True`,
otherwise defaults to `False`.
Not relevant for :class:`~pandas.Series`.
**kwargs
Unused.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the
%(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.corr : Equivalent method for Series.
DataFrame.corr : Equivalent method for DataFrame.
%(name)s.cov : Similar method to calculate covariance.
numpy.corrcoef : NumPy Pearson's correlation calculation.
Notes
-----
This function uses Pearson's definition of correlation
(https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).
When `other` is not specified, the output will be self correlation (e.g.
all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`
set to `True`.
Function will return ``NaN`` for correlations of equal valued sequences;
this is the result of a 0/0 division error.
When `pairwise` is set to `False`, only matching columns between `self` and
`other` will be used.
When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame
with the original index on the first level, and the `other` DataFrame
columns on the second level.
In the case of missing elements, only complete pairwise observations
will be used.
Examples
--------
The below example shows a rolling calculation with a window size of
four matching the equivalent function call using :meth:`numpy.corrcoef`.
>>> v1 = [3, 3, 3, 5, 8]
>>> v2 = [3, 4, 4, 4, 8]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> # numpy returns a 2X2 array, the correlation coefficient
>>> # is the number at entry [0][1]
>>> print(fmt.format(np.corrcoef(v1[:-1], v2[:-1])[0][1]))
0.333333
>>> print(fmt.format(np.corrcoef(v1[1:], v2[1:])[0][1]))
0.916949
>>> s1 = pd.Series(v1)
>>> s2 = pd.Series(v2)
>>> s1.rolling(4).corr(s2)
0 NaN
1 NaN
2 NaN
3 0.333333
4 0.916949
dtype: float64
The below example shows a similar rolling calculation on a
DataFrame using the pairwise option.
>>> matrix = np.array([[51., 35.], [49., 30.], [47., 32.],\
[46., 31.], [50., 36.]])
>>> print(np.corrcoef(matrix[:-1,0], matrix[:-1,1]).round(7))
[[1. 0.6263001]
[0.6263001 1. ]]
>>> print(np.corrcoef(matrix[1:,0], matrix[1:,1]).round(7))
[[1. 0.5553681]
[0.5553681 1. ]]
>>> df = pd.DataFrame(matrix, columns=['X','Y'])
>>> df
X Y
0 51.0 35.0
1 49.0 30.0
2 47.0 32.0
3 46.0 31.0
4 50.0 36.0
>>> df.rolling(4).corr(pairwise=True)
X Y
0 X NaN NaN
Y NaN NaN
1 X NaN NaN
Y NaN NaN
2 X NaN NaN
Y NaN NaN
3 X 1.000000 0.626300
Y 0.626300 1.000000
4 X 1.000000 0.555368
Y 0.555368 1.000000
"""
)
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
b = b.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise)
)
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(
self._on, (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex)
)
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns:
from pandas import Index
return Index(self.obj[self.on])
else:
raise ValueError(
"invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on)
)
def validate(self):
super().validate()
# we allow rolling on a datetimelike index
if (self.obj.empty or self.is_datetimelike) and isinstance(
self.window, (str, ABCDateOffset, timedelta)
):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError(
"center is not implemented "
"for datetimelike and offset "
"based windows"
)
# this will raise ValueError on non-fixed freqs
self.win_freq = self.window
self.window = freq.nanos
self.win_type = "freq"
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError(
"closed only implemented for datetimelike " "and offset based windows"
)
def _validate_monotonic(self):
"""
Validate on is_monotonic.
"""
if not self._on.is_monotonic:
formatted = self.on or "index"
raise ValueError("{0} must be " "monotonic".format(formatted))
def _validate_freq(self):
"""
Validate & return window frequency.
"""
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError(
"passed window {0} is not "
"compatible with a datetimelike "
"index".format(self.window)
)
_agg_see_also_doc = dedent(
"""
See Also
--------
Series.rolling
DataFrame.rolling
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name="rolling")
@Appender(_shared_docs["count"])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply("roll_count", "count")
return super().count()
@Substitution(name="rolling")
@Appender(_shared_docs["apply"])
def apply(self, func, raw=None, args=(), kwargs={}):
return super().apply(func, raw=raw, args=args, kwargs=kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_rolling_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
nv.validate_rolling_func("max", args, kwargs)
return super().max(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["min"])
def min(self, *args, **kwargs):
nv.validate_rolling_func("min", args, kwargs)
return super().min(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_rolling_func("mean", args, kwargs)
return super().mean(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["median"])
def median(self, **kwargs):
return super().median(**kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["std"])
def std(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("std", args, kwargs)
return super().std(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["var"])
def var(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("var", args, kwargs)
return super().var(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["skew"])
def skew(self, **kwargs):
return super().skew(**kwargs)
_agg_doc = dedent(
"""
Examples
--------
The example below will show a rolling calculation with a window size of
four matching the equivalent function call using `scipy.stats`.
>>> arr = [1, 2, 3, 4, 999]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> import scipy.stats
>>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))
-1.200000
>>> print(fmt.format(scipy.stats.kurtosis(arr[1:], bias=False)))
3.999946
>>> s = pd.Series(arr)
>>> s.rolling(4).kurt()
0 NaN
1 NaN
2 NaN
3 -1.200000
4 3.999946
dtype: float64
"""
)
@Appender(_agg_doc)
@Substitution(name="rolling")
@Appender(_shared_docs["kurt"])
def kurt(self, **kwargs):
return super().kurt(**kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["quantile"])
def quantile(self, quantile, interpolation="linear", **kwargs):
return super().quantile(
quantile=quantile, interpolation=interpolation, **kwargs
)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["cov"])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["corr"])
def corr(self, other=None, pairwise=None, **kwargs):
return super().corr(other=other, pairwise=pairwise, **kwargs)
class RollingGroupby(_GroupByMixin, Rolling):
"""
Provide a rolling groupby implementation.
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Rolling
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried thru to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
self._groupby.obj = self._groupby.obj.set_index(self._on)
self.on = None
return super()._gotitem(key, ndim, subset=subset)
def _validate_monotonic(self):
"""
Validate that on is monotonic;
we don't care for groupby.rolling
because we have already validated at a higher
level.
"""
pass
class Expanding(_Rolling_and_Expanding):
"""
Provide expanding transformations.
.. versionadded:: 0.18.0
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : bool, default False
Set the labels at the center of the window.
axis : int or str, default 0
Returns
-------
a Window sub-classed for the particular operation
See Also
--------
rolling : Provides rolling window calculations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.expanding(2).sum()
B
0 NaN
1 1.0
2 3.0
3 3.0
4 7.0
"""
_attributes = ["min_periods", "center", "axis"]
def __init__(self, obj, min_periods=1, center=False, axis=0, **kwargs):
super().__init__(obj=obj, min_periods=min_periods, center=center, axis=axis)
@property
def _constructor(self):
return Expanding
def _get_window(self, other=None):
"""
Get the window length over which to perform some operation.
Parameters
----------
other : object, default None
The other object that is involved in the operation.
Such an object is involved for operations like covariance.
Returns
-------
window : int
The window length.
"""
axis = self.obj._get_axis(self.axis)
length = len(axis) + (other is not None) * len(axis)
other = self.min_periods or -1
return max(length, other)
_agg_see_also_doc = dedent(
"""
See Also
--------
DataFrame.expanding.aggregate
DataFrame.rolling.aggregate
DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@ | Appender(_shared_docs["aggregate"]) | pandas.util._decorators.Appender |
# +
import os
import numpy as np
import pandas as pd
from tasrif.processing_pipeline import SequenceOperator
from tasrif.processing_pipeline.pandas import (
AsTypeOperator,
ConvertToDatetimeOperator,
JsonNormalizeOperator,
SetIndexOperator,
RenameOperator,
ResetIndexOperator,
DropFeaturesOperator,
)
from tasrif.processing_pipeline.custom import (
SimulateDayOperator,
AggregateOperator,
CreateFeatureOperator
)
from tasrif.processing_pipeline.tsfresh import TSFreshFeatureExtractorOperator
# -
# Load the data
dates = pd.date_range("2016-12-31", "2020-01-03", freq="15T").to_series()
df = | pd.DataFrame() | pandas.DataFrame |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
# noqa: MD02
"""
Details about how Indexing Helper Class works.
_LocationIndexerBase provide methods framework for __getitem__
and __setitem__ that work with Modin DataFrame's internal index. Base
class's __{get,set}item__ takes in partitions & idx_in_partition data
and perform lookup/item write.
_LocIndexer and _iLocIndexer is responsible for indexer specific logic and
lookup computation. Loc will take care of enlarge DataFrame. Both indexer
will take care of translating pandas's lookup to Modin DataFrame's internal
lookup.
An illustration is available at
https://github.com/ray-project/ray/pull/1955#issuecomment-386781826
"""
import numpy as np
import pandas
import itertools
from pandas.api.types import is_list_like, is_bool
from pandas.core.dtypes.common import is_integer, is_bool_dtype, is_integer_dtype
from pandas.core.indexing import IndexingError
from modin.error_message import ErrorMessage
from modin.logging import LoggerMetaClass, metaclass_resolver
from .dataframe import DataFrame
from .series import Series
from .utils import is_scalar
def is_slice(x):
"""
Check that argument is an instance of slice.
Parameters
----------
x : object
Object to check.
Returns
-------
bool
True if argument is a slice, False otherwise.
"""
return isinstance(x, slice)
def compute_sliced_len(slc, sequence_len):
"""
Compute length of sliced object.
Parameters
----------
slc : slice
Slice object.
sequence_len : int
Length of sequence, to which slice will be applied.
Returns
-------
int
Length of object after applying slice object on it.
"""
# This will translate slice to a range, from which we can retrieve length
return len(range(*slc.indices(sequence_len)))
def is_2d(x):
"""
Check that argument is a list or a slice.
Parameters
----------
x : object
Object to check.
Returns
-------
bool
`True` if argument is a list or slice, `False` otherwise.
"""
return is_list_like(x) or is_slice(x)
def is_tuple(x):
"""
Check that argument is a tuple.
Parameters
----------
x : object
Object to check.
Returns
-------
bool
True if argument is a tuple, False otherwise.
"""
return isinstance(x, tuple)
def is_boolean_array(x):
"""
Check that argument is an array of bool.
Parameters
----------
x : object
Object to check.
Returns
-------
bool
True if argument is an array of bool, False otherwise.
"""
if isinstance(x, (np.ndarray, Series, pandas.Series, pandas.Index)):
return is_bool_dtype(x.dtype)
elif isinstance(x, (DataFrame, pandas.DataFrame)):
return all(map(is_bool_dtype, x.dtypes))
return is_list_like(x) and all(map(is_bool, x))
def is_integer_array(x):
"""
Check that argument is an array of integers.
Parameters
----------
x : object
Object to check.
Returns
-------
bool
True if argument is an array of integers, False otherwise.
"""
if isinstance(x, (np.ndarray, Series, pandas.Series, pandas.Index)):
return | is_integer_dtype(x.dtype) | pandas.core.dtypes.common.is_integer_dtype |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import decimal
import json
import multiprocessing as mp
from collections import OrderedDict
from datetime import date, datetime, time, timedelta
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.util.testing as tm
import pytest
import pyarrow as pa
import pyarrow.types as patypes
from pyarrow.compat import PY2
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
def _alltypes_example(size=100):
return pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Pandas only support ns resolution, Arrow supports s, ms,
# us, ns
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
def _check_pandas_roundtrip(df, expected=None, use_threads=True,
expected_schema=None,
check_dtype=True, schema=None,
preserve_index=False,
as_batch=False):
klass = pa.RecordBatch if as_batch else pa.Table
table = klass.from_pandas(df, schema=schema,
preserve_index=preserve_index,
nthreads=2 if use_threads else 1)
result = table.to_pandas(use_threads=use_threads)
if expected_schema:
# all occurences of _check_pandas_roundtrip passes expected_schema
# without the pandas generated key-value metadata, so we need to
# add it before checking schema equality
expected_schema = expected_schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
if expected is None:
expected = df
tm.assert_frame_equal(result, expected, check_dtype=check_dtype,
check_index_type=('equiv' if preserve_index
else False))
def _check_series_roundtrip(s, type_=None, expected_pa_type=None):
arr = pa.array(s, from_pandas=True, type=type_)
if type_ is not None and expected_pa_type is None:
expected_pa_type = type_
if expected_pa_type is not None:
assert arr.type == expected_pa_type
result = pd.Series(arr.to_pandas(), name=s.name)
if patypes.is_timestamp(arr.type) and arr.type.tz is not None:
result = (result.dt.tz_localize('utc')
.dt.tz_convert(arr.type.tz))
tm.assert_series_equal(s, result)
def _check_array_roundtrip(values, expected=None, mask=None,
type=None):
arr = pa.array(values, from_pandas=True, mask=mask, type=type)
result = arr.to_pandas()
values_nulls = pd.isnull(values)
if mask is None:
assert arr.null_count == values_nulls.sum()
else:
assert arr.null_count == (mask | values_nulls).sum()
if mask is None:
tm.assert_series_equal(pd.Series(result), pd.Series(values),
check_names=False)
else:
expected = pd.Series(np.ma.masked_array(values, mask=mask))
tm.assert_series_equal(pd.Series(result), expected,
check_names=False)
def _check_array_from_pandas_roundtrip(np_array, type=None):
arr = pa.array(np_array, from_pandas=True, type=type)
result = arr.to_pandas()
npt.assert_array_equal(result, np_array)
class TestConvertMetadata(object):
"""
Conversion tests for Pandas metadata & indices.
"""
def test_non_string_columns(self):
df = pd.DataFrame({0: [1, 2, 3]})
table = pa.Table.from_pandas(df)
assert table.column(0).name == '0'
def test_from_pandas_with_columns(self):
df = pd.DataFrame({0: [1, 2, 3], 1: [1, 3, 3], 2: [2, 4, 5]})
table = pa.Table.from_pandas(df, columns=[0, 1])
expected = pa.Table.from_pandas(df[[0, 1]])
assert expected.equals(table)
record_batch_table = pa.RecordBatch.from_pandas(df, columns=[0, 1])
record_batch_expected = pa.RecordBatch.from_pandas(df[[0, 1]])
assert record_batch_expected.equals(record_batch_table)
def test_column_index_names_are_preserved(self):
df = pd.DataFrame({'data': [1, 2, 3]})
df.columns.names = ['a']
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns(self):
columns = pd.MultiIndex.from_arrays([
['one', 'two'], ['X', 'Y']
])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_with_dtypes(self):
columns = pd.MultiIndex.from_arrays(
[
['one', 'two'],
pd.DatetimeIndex(['2017-08-01', '2017-08-02']),
],
names=['level_1', 'level_2'],
)
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_unicode(self):
columns = pd.MultiIndex.from_arrays([[u'あ', u'い'], ['X', 'Y']])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_integer_index_column(self):
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')])
_check_pandas_roundtrip(df, preserve_index=True)
def test_index_metadata_field_name(self):
# test None case, and strangely named non-index columns
df = pd.DataFrame(
[(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)],
index=pd.MultiIndex.from_arrays(
[['c', 'b', 'a'], [3, 2, 1]],
names=[None, 'foo']
),
columns=['a', None, '__index_level_0__'],
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
col1, col2, col3, idx0, foo = js['columns']
assert col1['name'] == 'a'
assert col1['name'] == col1['field_name']
assert col2['name'] is None
assert col2['field_name'] == 'None'
assert col3['name'] == '__index_level_0__'
assert col3['name'] == col3['field_name']
idx0_name, foo_name = js['index_columns']
assert idx0_name == '__index_level_0__'
assert idx0['field_name'] == idx0_name
assert idx0['name'] is None
assert foo_name == 'foo'
assert foo['field_name'] == foo_name
assert foo['name'] == foo_name
def test_categorical_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), dtype='category')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'categorical'
assert column_indexes['numpy_type'] == 'int8'
md = column_indexes['metadata']
assert md['num_categories'] == 3
assert md['ordered'] is False
def test_string_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), name='stringz')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] == 'stringz'
assert column_indexes['name'] == column_indexes['field_name']
assert column_indexes['pandas_type'] == ('bytes' if PY2 else 'unicode')
assert column_indexes['numpy_type'] == 'object'
md = column_indexes['metadata']
if not PY2:
assert len(md) == 1
assert md['encoding'] == 'UTF-8'
else:
assert md is None or 'encoding' not in md
def test_datetimetz_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'datetimetz'
assert column_indexes['numpy_type'] == 'datetime64[ns]'
md = column_indexes['metadata']
assert md['timezone'] == 'America/New_York'
def test_datetimetz_row_index(self):
df = pd.DataFrame({
'a': pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
})
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_categorical_row_index(self):
df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})
df['a'] = df.a.astype('category')
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_duplicate_column_names_does_not_crash(self):
df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa'))
with pytest.raises(ValueError):
pa.Table.from_pandas(df)
def test_dictionary_indices_boundscheck(self):
# ARROW-1658. No validation of indices leads to segfaults in pandas
indices = [[0, 1], [0, -1]]
for inds in indices:
arr = pa.DictionaryArray.from_arrays(inds, ['a'], safe=False)
batch = pa.RecordBatch.from_arrays([arr], ['foo'])
table = pa.Table.from_batches([batch, batch, batch])
with pytest.raises(pa.ArrowInvalid):
arr.to_pandas()
with pytest.raises(pa.ArrowInvalid):
table.to_pandas()
def test_unicode_with_unicode_column_and_index(self):
df = pd.DataFrame({u'あ': [u'い']}, index=[u'う'])
_check_pandas_roundtrip(df, preserve_index=True)
def test_mixed_unicode_column_names(self):
df = pd.DataFrame({u'あ': [u'い'], b'a': 1}, index=[u'う'])
# TODO(phillipc): Should this raise?
with pytest.raises(AssertionError):
_check_pandas_roundtrip(df, preserve_index=True)
def test_binary_column_name(self):
column_data = [u'い']
key = u'あ'.encode('utf8')
data = {key: column_data}
df = pd.DataFrame(data)
# we can't use _check_pandas_roundtrip here because our metdata
# is always decoded as utf8: even if binary goes in, utf8 comes out
t = pa.Table.from_pandas(df, preserve_index=True)
df2 = t.to_pandas()
assert df.values[0] == df2.values[0]
assert df.index.values[0] == df2.index.values[0]
assert df.columns[0] == key
def test_multiindex_duplicate_values(self):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
result_df = table.to_pandas()
tm.assert_frame_equal(result_df, df)
def test_metadata_with_mixed_types(self):
df = pd.DataFrame({'data': [b'some_bytes', u'some_unicode']})
table = pa.Table.from_pandas(df)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'bytes'
assert data_column['numpy_type'] == 'object'
def test_list_metadata(self):
df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]})
schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))])
table = pa.Table.from_pandas(df, schema=schema)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'list[int64]'
assert data_column['numpy_type'] == 'object'
def test_decimal_metadata(self):
expected = pd.DataFrame({
'decimals': [
decimal.Decimal('394092382910493.12341234678'),
-decimal.Decimal('314292388910493.12343437128'),
]
})
table = pa.Table.from_pandas(expected)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'decimal'
assert data_column['numpy_type'] == 'object'
assert data_column['metadata'] == {'precision': 26, 'scale': 11}
def test_table_column_subset_metadata(self):
# ARROW-1883
df = pd.DataFrame({
'a': [1, 2, 3],
'b': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']])
# non-default index
for index in [
pd.Index(['a', 'b', 'c'], name='index'),
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')]:
df = pd.DataFrame({'a': [1, 2, 3],
'b': [.1, .2, .3]}, index=index)
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']].reset_index(drop=True))
def test_empty_list_metadata(self):
# Create table with array of empty lists, forced to have type
# list(string) in pyarrow
c1 = [["test"], ["a", "b"], None]
c2 = [[], [], []]
arrays = OrderedDict([
('c1', pa.array(c1, type=pa.list_(pa.string()))),
('c2', pa.array(c2, type=pa.list_(pa.string()))),
])
rb = pa.RecordBatch.from_arrays(
list(arrays.values()),
list(arrays.keys())
)
tbl = pa.Table.from_batches([rb])
# First roundtrip changes schema, because pandas cannot preserve the
# type of empty lists
df = tbl.to_pandas()
tbl2 = pa.Table.from_pandas(df, preserve_index=True)
md2 = json.loads(tbl2.schema.metadata[b'pandas'].decode('utf8'))
# Second roundtrip
df2 = tbl2.to_pandas()
expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)]))
tm.assert_frame_equal(df2, expected)
assert md2['columns'] == [
{
'name': 'c1',
'field_name': 'c1',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[unicode]',
},
{
'name': 'c2',
'field_name': 'c2',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[empty]',
},
{
'name': None,
'field_name': '__index_level_0__',
'metadata': None,
'numpy_type': 'int64',
'pandas_type': 'int64',
}
]
class TestConvertPrimitiveTypes(object):
"""
Conversion tests for primitive (e.g. numeric) types.
"""
def test_float_no_nulls(self):
data = {}
fields = []
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
num_values = 100
for numpy_dtype, arrow_dtype in dtypes:
values = np.random.randn(num_values)
data[numpy_dtype] = values.astype(numpy_dtype)
fields.append(pa.field(numpy_dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_float_nulls(self):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
names = ['f2', 'f4', 'f8']
expected_cols = []
arrays = []
fields = []
for name, arrow_dtype in dtypes:
values = np.random.randn(num_values).astype(name)
arr = pa.array(values, from_pandas=True, mask=null_mask)
arrays.append(arr)
fields.append(pa.field(name, arrow_dtype))
values[null_mask] = np.nan
expected_cols.append(values)
ex_frame = pd.DataFrame(dict(zip(names, expected_cols)),
columns=names)
table = pa.Table.from_arrays(arrays, names)
assert table.schema.equals(pa.schema(fields))
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_nulls_to_ints(self):
# ARROW-2135
df = pd.DataFrame({"a": [1.0, 2.0, pd.np.NaN]})
schema = pa.schema([pa.field("a", pa.int16(), nullable=True)])
table = pa.Table.from_pandas(df, schema=schema, safe=False)
assert table[0].to_pylist() == [1, 2, None]
tm.assert_frame_equal(df, table.to_pandas())
def test_integer_no_nulls(self):
data = OrderedDict()
fields = []
numpy_dtypes = [
('i1', pa.int8()), ('i2', pa.int16()),
('i4', pa.int32()), ('i8', pa.int64()),
('u1', pa.uint8()), ('u2', pa.uint16()),
('u4', pa.uint32()), ('u8', pa.uint64()),
('longlong', pa.int64()), ('ulonglong', pa.uint64())
]
num_values = 100
for dtype, arrow_dtype in numpy_dtypes:
info = np.iinfo(dtype)
values = np.random.randint(max(info.min, np.iinfo(np.int_).min),
min(info.max, np.iinfo(np.int_).max),
size=num_values)
data[dtype] = values.astype(dtype)
fields.append(pa.field(dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_integer_types(self):
# Test all Numpy integer aliases
data = OrderedDict()
numpy_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',
'byte', 'ubyte', 'short', 'ushort', 'intc', 'uintc',
'int_', 'uint', 'longlong', 'ulonglong']
for dtype in numpy_dtypes:
data[dtype] = np.arange(12, dtype=dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df)
# Do the same with pa.array()
# (for some reason, it doesn't use the same code paths at all)
for np_arr in data.values():
arr = pa.array(np_arr)
assert arr.to_pylist() == np_arr.tolist()
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
arrays = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
arr = pa.array(values, mask=null_mask)
arrays.append(arr)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
table = pa.Table.from_arrays(arrays, int_dtypes)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_array_from_pandas_type_cast(self):
arr = np.arange(10, dtype='int64')
target_type = pa.int8()
result = pa.array(arr, type=target_type)
expected = pa.array(arr.astype('int8'))
assert result.equals(expected)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
num_values = 100
np.random.seed(0)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
arr = pa.array(values, mask=mask)
expected = values.astype(object)
expected[mask] = None
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
ex_frame = pd.DataFrame({'bools': expected})
table = pa.Table.from_arrays([arr], ['bools'])
assert table.schema.equals(schema)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_object_nulls(self):
arr = np.array([None, 1.5, np.float64(3.5)] * 5, dtype=object)
df = pd.DataFrame({'floats': arr})
expected = pd.DataFrame({'floats': pd.to_numeric(arr)})
field = pa.field('floats', pa.float64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_int_object_nulls(self):
arr = np.array([None, 1, np.int64(3)] * 5, dtype=object)
df = pd.DataFrame({'ints': arr})
expected = pd.DataFrame({'ints': pd.to_numeric(arr)})
field = pa.field('ints', pa.int64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_boolean_object_nulls(self):
arr = np.array([False, None, True] * 100, dtype=object)
df = pd.DataFrame({'bools': arr})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_nulls_cast_numeric(self):
arr = np.array([None], dtype=object)
def _check_type(t):
a2 = pa.array(arr, type=t)
assert a2.type == t
assert a2[0].as_py() is None
_check_type(pa.int32())
_check_type(pa.float64())
def test_half_floats_from_numpy(self):
arr = np.array([1.5, np.nan], dtype=np.float16)
a = pa.array(arr, type=pa.float16())
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert isinstance(y, np.float16)
assert np.isnan(y)
a = pa.array(arr, type=pa.float16(), from_pandas=True)
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert y is None
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_array_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
result = array.to_pandas(integer_object_nulls=True)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_table_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
expected = pd.DataFrame({dtype: expected})
table = pa.Table.from_arrays([array], [dtype])
result = table.to_pandas(integer_object_nulls=True)
tm.assert_frame_equal(result, expected)
class TestConvertDateTimeLikeTypes(object):
"""
Conversion tests for datetime- and timestamp-like types (date64, etc.).
"""
def test_timestamps_notimezone_no_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_notimezone_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_with_timezone(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123',
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
_check_series_roundtrip(df['datetime64'])
# drop-in a null and ns instead of ms
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
def test_python_datetime(self):
# ARROW-2106
date_array = [datetime.today() + timedelta(days=x) for x in range(10)]
df = pd.DataFrame({
'datetime': pd.Series(date_array, dtype=object)
})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({
'datetime': date_array
})
tm.assert_frame_equal(expected_df, result)
def test_python_datetime_subclass(self):
class MyDatetime(datetime):
# see https://github.com/pandas-dev/pandas/issues/21142
nanosecond = 0.0
date_array = [MyDatetime(2000, 1, 1, 1, 1, 1)]
df = pd.DataFrame({"datetime": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({"datetime": date_array})
# https://github.com/pandas-dev/pandas/issues/21142
expected_df["datetime"] = pd.to_datetime(expected_df["datetime"])
tm.assert_frame_equal(expected_df, result)
def test_python_date_subclass(self):
class MyDate(date):
pass
date_array = [MyDate(2000, 1, 1)]
df = pd.DataFrame({"date": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.Date32Array)
result = table.to_pandas()
expected_df = pd.DataFrame(
{"date": np.array(["2000-01-01"], dtype="datetime64[ns]")}
)
tm.assert_frame_equal(expected_df, result)
def test_datetime64_to_date32(self):
# ARROW-1718
arr = pa.array([date(2017, 10, 23), None])
c = pa.Column.from_array("d", arr)
s = c.to_pandas()
arr2 = pa.Array.from_pandas(s, type=pa.date32())
assert arr2.equals(arr.cast('date32'))
@pytest.mark.parametrize('mask', [
None,
np.array([True, False, False]),
])
def test_pandas_datetime_to_date64(self, mask):
s = pd.to_datetime([
'2018-05-10T00:00:00',
'2018-05-11T00:00:00',
'2018-05-12T00:00:00',
])
arr = pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
data = np.array([
date(2018, 5, 10),
date(2018, 5, 11),
date(2018, 5, 12)
])
expected = pa.array(data, mask=mask, type=pa.date64())
assert arr.equals(expected)
@pytest.mark.parametrize('mask', [
None,
np.array([True, False, False])
])
def test_pandas_datetime_to_date64_failures(self, mask):
s = pd.to_datetime([
'2018-05-10T10:24:01',
'2018-05-11T10:24:01',
'2018-05-12T10:24:01',
])
expected_msg = 'Timestamp value had non-zero intraday milliseconds'
with pytest.raises(pa.ArrowInvalid, match=expected_msg):
pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
def test_array_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
arr = pa.array(data)
assert arr.equals(pa.array(expected))
result = arr.to_pandas()
assert result.dtype == expected.dtype
npt.assert_array_equal(arr.to_pandas(), expected)
result = arr.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_chunked_array_convert_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
carr = pa.chunked_array([data])
result = carr.to_pandas()
assert result.dtype == expected.dtype
npt.assert_array_equal(carr.to_pandas(), expected)
result = carr.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_column_convert_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
arr = pa.array(data)
column = pa.column('date', arr)
result = column.to_pandas()
npt.assert_array_equal(column.to_pandas(), expected)
result = column.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_table_convert_date_as_object(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
df_datetime = table.to_pandas()
df_object = table.to_pandas(date_as_object=True)
tm.assert_frame_equal(df.astype('datetime64[ns]'), df_datetime,
check_dtype=True)
tm.assert_frame_equal(df, df_object, check_dtype=True)
def test_date_infer(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
field = pa.field('date', pa.date32())
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
result = table.to_pandas()
expected = df.copy()
expected['date'] = pd.to_datetime(df['date'])
tm.assert_frame_equal(result, expected)
def test_date_mask(self):
arr = np.array([date(2017, 4, 3), date(2017, 4, 4)],
dtype='datetime64[D]')
mask = [True, False]
result = pa.array(arr, mask=np.array(mask))
expected = np.array([None, date(2017, 4, 4)], dtype='datetime64[D]')
expected = pa.array(expected, from_pandas=True)
assert expected.equals(result)
def test_date_objects_typed(self):
arr = np.array([
date(2017, 4, 3),
None,
date(2017, 4, 4),
date(2017, 4, 5)], dtype=object)
arr_i4 = np.array([17259, -1, 17260, 17261], dtype='int32')
arr_i8 = arr_i4.astype('int64') * 86400000
mask = np.array([False, True, False, False])
t32 = pa.date32()
t64 = pa.date64()
a32 = pa.array(arr, type=t32)
a64 = pa.array(arr, type=t64)
a32_expected = pa.array(arr_i4, mask=mask, type=t32)
a64_expected = pa.array(arr_i8, mask=mask, type=t64)
assert a32.equals(a32_expected)
assert a64.equals(a64_expected)
# Test converting back to pandas
colnames = ['date32', 'date64']
table = pa.Table.from_arrays([a32, a64], colnames)
table_pandas = table.to_pandas()
ex_values = (np.array(['2017-04-03', '2017-04-04', '2017-04-04',
'2017-04-05'],
dtype='datetime64[D]')
.astype('datetime64[ns]'))
ex_values[1] = pd.NaT.value
expected_pandas = pd.DataFrame({'date32': ex_values,
'date64': ex_values},
columns=colnames)
tm.assert_frame_equal(table_pandas, expected_pandas)
def test_dates_from_integers(self):
t1 = pa.date32()
t2 = pa.date64()
arr = np.array([17259, 17260, 17261], dtype='int32')
arr2 = arr.astype('int64') * 86400000
a1 = pa.array(arr, type=t1)
a2 = pa.array(arr2, type=t2)
expected = date(2017, 4, 3)
assert a1[0].as_py() == expected
assert a2[0].as_py() == expected
@pytest.mark.xfail(reason="not supported ATM",
raises=NotImplementedError)
def test_timedelta(self):
# TODO(jreback): Pandas only support ns resolution
# Arrow supports ??? for resolution
df = pd.DataFrame({
'timedelta': np.arange(start=0, stop=3 * 86400000,
step=86400000,
dtype='timedelta64[ms]')
})
pa.Table.from_pandas(df)
def test_pytime_from_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356)]
# microseconds
t1 = pa.time64('us')
aobjs = np.array(pytimes + [None], dtype=object)
parr = pa.array(aobjs)
assert parr.type == t1
assert parr[0].as_py() == pytimes[0]
assert parr[1].as_py() == pytimes[1]
assert parr[2] is pa.NA
# DataFrame
df = pd.DataFrame({'times': aobjs})
batch = pa.RecordBatch.from_pandas(df)
assert batch[0].equals(parr)
# Test ndarray of int64 values
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
a1 = pa.array(arr, type=pa.time64('us'))
assert a1[0].as_py() == pytimes[0]
a2 = pa.array(arr * 1000, type=pa.time64('ns'))
assert a2[0].as_py() == pytimes[0]
a3 = pa.array((arr / 1000).astype('i4'),
type=pa.time32('ms'))
assert a3[0].as_py() == pytimes[0].replace(microsecond=1000)
a4 = pa.array((arr / 1000000).astype('i4'),
type=pa.time32('s'))
assert a4[0].as_py() == pytimes[0].replace(microsecond=0)
def test_arrow_time_to_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356),
time(0, 0, 0)]
expected = np.array(pytimes[:2] + [None])
expected_ms = np.array([x.replace(microsecond=1000)
for x in pytimes[:2]] +
[None])
expected_s = np.array([x.replace(microsecond=0)
for x in pytimes[:2]] +
[None])
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
null_mask = np.array([False, False, True], dtype=bool)
a1 = pa.array(arr, mask=null_mask, type=pa.time64('us'))
a2 = pa.array(arr * 1000, mask=null_mask,
type=pa.time64('ns'))
a3 = pa.array((arr / 1000).astype('i4'), mask=null_mask,
type=pa.time32('ms'))
a4 = pa.array((arr / 1000000).astype('i4'), mask=null_mask,
type=pa.time32('s'))
names = ['time64[us]', 'time64[ns]', 'time32[ms]', 'time32[s]']
batch = pa.RecordBatch.from_arrays([a1, a2, a3, a4], names)
arr = a1.to_pandas()
assert (arr == expected).all()
arr = a2.to_pandas()
assert (arr == expected).all()
arr = a3.to_pandas()
assert (arr == expected_ms).all()
arr = a4.to_pandas()
assert (arr == expected_s).all()
df = batch.to_pandas()
expected_df = pd.DataFrame({'time64[us]': expected,
'time64[ns]': expected,
'time32[ms]': expected_ms,
'time32[s]': expected_s},
columns=names)
tm.assert_frame_equal(df, expected_df)
def test_numpy_datetime64_columns(self):
datetime64_ns = np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
_check_array_from_pandas_roundtrip(datetime64_ns)
datetime64_us = np.array([
'2007-07-13T01:23:34.123456',
None,
'2006-01-13T12:34:56.432539',
'2010-08-13T05:46:57.437699'],
dtype='datetime64[us]')
_check_array_from_pandas_roundtrip(datetime64_us)
datetime64_ms = np.array([
'2007-07-13T01:23:34.123',
None,
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
_check_array_from_pandas_roundtrip(datetime64_ms)
datetime64_s = np.array([
'2007-07-13T01:23:34',
None,
'2006-01-13T12:34:56',
'2010-08-13T05:46:57'],
dtype='datetime64[s]')
_check_array_from_pandas_roundtrip(datetime64_s)
@pytest.mark.parametrize('dtype', [pa.date32(), pa.date64()])
def test_numpy_datetime64_day_unit(self, dtype):
datetime64_d = np.array([
'2007-07-13',
None,
'2006-01-15',
'2010-08-19'],
dtype='datetime64[D]')
_check_array_from_pandas_roundtrip(datetime64_d, type=dtype)
def test_array_from_pandas_date_with_mask(self):
m = np.array([True, False, True])
data = pd.Series([
date(1990, 1, 1),
date(1991, 1, 1),
date(1992, 1, 1)
])
result = pa.Array.from_pandas(data, mask=m)
expected = pd.Series([None, date(1991, 1, 1), None])
assert pa.Array.from_pandas(expected).equals(result)
def test_fixed_offset_timezone(self):
df = pd.DataFrame({
'a': [
pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.NaT
]
})
_check_pandas_roundtrip(df)
_check_serialize_components_roundtrip(df)
# ----------------------------------------------------------------------
# Conversion tests for string and binary types.
class TestConvertStringLikeTypes(object):
def test_pandas_unicode(self):
repeats = 1000
values = [u'foo', None, u'bar', u'mañana', np.nan]
df = pd.DataFrame({'strings': values * repeats})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_bytes_to_binary(self):
values = [u'qux', b'foo', None, bytearray(b'barz'), 'qux', np.nan]
df = pd.DataFrame({'strings': values})
table = pa.Table.from_pandas(df)
assert table[0].type == pa.binary()
values2 = [b'qux', b'foo', None, b'barz', b'qux', np.nan]
expected = pd.DataFrame({'strings': values2})
_check_pandas_roundtrip(df, expected)
@pytest.mark.large_memory
def test_bytes_exceed_2gb(self):
v1 = b'x' * 100000000
v2 = b'x' * 147483646
# ARROW-2227, hit exactly 2GB on the nose
df = pd.DataFrame({
'strings': [v1] * 20 + [v2] + ['x'] * 20
})
arr = pa.array(df['strings'])
assert isinstance(arr, pa.ChunkedArray)
assert arr.num_chunks == 2
arr = None
table = pa.Table.from_pandas(df)
assert table[0].data.num_chunks == 2
def test_fixed_size_bytes(self):
values = [b'foo', None, bytearray(b'bar'), None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
table = pa.Table.from_pandas(df, schema=schema)
assert table.schema[0].type == schema[0].type
assert table.schema[0].name == schema[0].name
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_fixed_size_bytes_does_not_accept_varying_lengths(self):
values = [b'foo', None, b'ba', None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
with pytest.raises(pa.ArrowInvalid):
pa.Table.from_pandas(df, schema=schema)
def test_variable_size_bytes(self):
s = pd.Series([b'123', b'', b'a', None])
_check_series_roundtrip(s, type_=pa.binary())
def test_binary_from_bytearray(self):
s = pd.Series([bytearray(b'123'), bytearray(b''), bytearray(b'a'),
None])
# Explicitly set type
_check_series_roundtrip(s, type_=pa.binary())
# Infer type from bytearrays
_check_series_roundtrip(s, expected_pa_type=pa.binary())
def test_table_empty_str(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result1 = table.to_pandas(strings_to_categorical=False)
expected1 = pd.DataFrame({'strings': values})
tm.assert_frame_equal(result1, expected1, check_dtype=True)
result2 = table.to_pandas(strings_to_categorical=True)
expected2 = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result2, expected2, check_dtype=True)
def test_selective_categoricals(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
expected_str = pd.DataFrame({'strings': values})
expected_cat = pd.DataFrame({'strings': pd.Categorical(values)})
result1 = table.to_pandas(categories=['strings'])
tm.assert_frame_equal(result1, expected_cat, check_dtype=True)
result2 = table.to_pandas(categories=[])
tm.assert_frame_equal(result2, expected_str, check_dtype=True)
result3 = table.to_pandas(categories=('strings',))
tm.assert_frame_equal(result3, expected_cat, check_dtype=True)
result4 = table.to_pandas(categories=tuple())
tm.assert_frame_equal(result4, expected_str, check_dtype=True)
def test_table_str_to_categorical_without_na(self):
values = ['a', 'a', 'b', 'b', 'c']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
def test_table_str_to_categorical_with_na(self):
values = [None, 'a', 'b', np.nan]
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
# Regression test for ARROW-2101
def test_array_of_bytes_to_strings(self):
converted = pa.array(np.array([b'x'], dtype=object), pa.string())
assert converted.type == pa.string()
# Make sure that if an ndarray of bytes is passed to the array
# constructor and the type is string, it will fail if those bytes
# cannot be converted to utf-8
def test_array_of_bytes_to_strings_bad_data(self):
with pytest.raises(
pa.lib.ArrowInvalid,
match="was not a utf8 string"):
pa.array(np.array([b'\x80\x81'], dtype=object), pa.string())
def test_numpy_string_array_to_fixed_size_binary(self):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
converted = pa.array(arr, type=pa.binary(3))
expected = pa.array(list(arr), type=pa.binary(3))
assert converted.equals(expected)
mask = np.array([True, False, True])
converted = pa.array(arr, type=pa.binary(3), mask=mask)
expected = pa.array([b'foo', None, b'baz'], type=pa.binary(3))
assert converted.equals(expected)
with pytest.raises(pa.lib.ArrowInvalid,
match=r'Got bytestring of length 3 \(expected 4\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
pa.array(arr, type=pa.binary(4))
with pytest.raises(
pa.lib.ArrowInvalid,
match=r'Got bytestring of length 12 \(expected 3\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|U3')
pa.array(arr, type=pa.binary(3))
class TestConvertDecimalTypes(object):
"""
Conversion test for decimal types.
"""
decimal32 = [
decimal.Decimal('-1234.123'),
decimal.Decimal('1234.439')
]
decimal64 = [
decimal.Decimal('-129934.123331'),
decimal.Decimal('129534.123731')
]
decimal128 = [
decimal.Decimal('394092382910493.12341234678'),
decimal.Decimal('-314292388910493.12343437128')
]
@pytest.mark.parametrize(('values', 'expected_type'), [
pytest.param(decimal32, pa.decimal128(7, 3), id='decimal32'),
pytest.param(decimal64, pa.decimal128(12, 6), id='decimal64'),
pytest.param(decimal128, pa.decimal128(26, 11), id='decimal128')
])
def test_decimal_from_pandas(self, values, expected_type):
expected = | pd.DataFrame({'decimals': values}) | pandas.DataFrame |
# Author: https://github.com/Gugu7264
import os
import gspread_asyncio as gaio
import hjson
import pandas as pd
from discord import Intents
from discord.ext import commands
from dotenv import load_dotenv
from oauth2client.service_account import ServiceAccountCredentials
import utilities
load_dotenv("../dev.env")
client = commands.Bot(command_prefix=os.getenv("prefix"), intents=Intents.all())
with open("../config.hjson") as f:
config = hjson.load(f)
def get_creds():
return ServiceAccountCredentials.from_json_keyfile_name(
"../creds.json",
["https://spreadsheets.google.com/feeds",
'https://www.googleapis.com/auth/spreadsheets',
"https://www.googleapis.com/auth/drive.file",
"https://www.googleapis.com/auth/drive"],
)
async def get_sheet(google_client):
session = await google_client.authorize()
sheet1 = (await session.open_by_url(
"https://docs.google.com/spreadsheets/d/1xvmK6yawHbhtfvi_0Dvp9afWv8mZ5Tn7o_szE1a76ZY/edit")).sheet1
sheet2 = (await session.open_by_url(
"https://docs.google.com/spreadsheets/d/1hsw5l0IXoPK9k9CWXZrW556yAUsUdjtSs9joub4Oa_g/edit")).sheet1
return sheet1, sheet2
def pair_data(data, cols, *row_name):
print("data", len(data))
final = []
c = 0
temp = []
for d in data:
if c == cols:
final.append(temp)
temp = []
c = 0
if d.value == "":
break
temp.append(d.value)
c += 1
if row_name:
c = 1
for name in row_name:
final[0][c] = name
c += 1
return final
async def main():
google_client = gaio.AsyncioGspreadClientManager(get_creds)
sheets = await get_sheet(google_client)
sheet = sheets[0]
sheet2 = sheets[1]
names = utilities.get_rank_categories(flatten=True)
print(names)
all_time = pair_data(sheet.range("J2:K" + str(sheet.row_count)), 2, "all_time")
df_all_time = pd.DataFrame(all_time[1:], columns=all_time[0])
monthly = pair_data(sheet.range("C2:D" + str(sheet.row_count)), 2, "monthly")
df_monthly = pd.DataFrame(monthly[1:], columns=monthly[0])
weekly = pair_data(sheet.range("Q2:R" + str(sheet.row_count)), 2, "weekly")
df_weekly = pd.DataFrame(weekly[1:], columns=weekly[0])
daily = pair_data(sheet.range("X2:Y" + str(sheet.row_count)), 2, "daily")
df_daily = | pd.DataFrame(daily[1:], columns=daily[0]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input,Output
import os
print(os.getcwd())
df_input_large=pd.read_csv('C:/Users/Asus/ads_covid-19/data/processed/COVID_large_flat_table.csv',sep=';',parse_dates=[0])
df_input_large=df_input_large.sort_values('date',ascending=True)
df_input_SIR= | pd.read_csv('C:/Users/Asus/ads_covid-19/data/processed/COVID_large_fitted_table.csv',sep=';') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
This file contains functions to fetch data from the Domestic Load Research SQL Server database. It must be run from a server with a DLR database installation.
The following functions are defined:
getObs
getProfileID
getMetaProfiles
profileFetchEst
getProfiles
getSampleProfiles
profilePeriod
getGroups
getLocation
saveTables
saveAllProfiles
anonAns
SOME EXAMPLES
# Using getObs with SQL queries:
query = 'SELECT * FROM [General_LR4].[dbo].[linktable] WHERE ProfileID = 12005320'
df = getObs(querystring = query)
"""
import pandas as pd
import numpy as np
import pyodbc
import feather
import os
import shapefile as shp
from shapely.geometry import Point
from shapely.geometry import shape
from support import rawprofiles_dir, table_dir, obs_dir, writeLog, validYears, data_dir
def getObs(db_cnx, tablename = None, querystring = 'SELECT * FROM tablename', chunksize = 10000):
"""
Fetches a specified table from the DLR database and returns it as a pandas dataframe.
"""
#connection object:
try:
with open(os.path.join(obs_dir, db_cnx), 'r') as f:
cnxnstr = f.read().replace('\n', '')
except FileNotFoundError as err:
print("File not found error: {0}".format(err))
raise
else:
try:
cnxn = pyodbc.connect(cnxnstr)
#specify and execute query(ies):
if querystring == "SELECT * FROM tablename":
if tablename is None:
return print('Specify a valid table from the DLR database')
elif tablename == 'Profiletable':
return print('The profiles table is too large to read into python in one go. Use the getProfiles() function.')
else:
query = "SELECT * FROM [General_LR4].[dbo].%s" % (tablename)
else:
query = querystring
df = pd.read_sql(query, cnxn) #read to dataframe
return df
except Exception:
raise
def geoMeta():
"""
This function generates geographic metadata for groups by combining GroupID Lat, Long co-ords with municipal boundaries dataset
"""
# Download the SHP, DBF and SHX files from http://energydata.uct.ac.za/dataset/2016-municipal-boundaries-south-africa
munic2016 = os.path.join(data_dir, 'obs_datasets', 'geo_meta', '2016-Boundaries-Local','Local_Municipalities_2016')
site_ref = pd.read_csv(os.path.join(data_dir, 'obs_datasets', 'geo_meta', 'DLR Site coordinates.csv'))
#response = requests.get(ckanurl)
sf = shp.Reader(munic2016)
all_shapes = sf.shapes() # get all the polygons
all_records = sf.records()
g = list()
for i in range(0, len(site_ref)):
for j in range(0, len(all_shapes)):
boundary = all_shapes[j]
if Point(tuple([site_ref.loc[i,'Long'],site_ref.loc[i,'Lat']])).within(shape(boundary)):
g.append([all_records[j][k] for k in (1, 5, 9)])
geo_meta = | pd.DataFrame(g, columns = ['Province','Municipality','District']) | pandas.DataFrame |
# Import libraries | Standard
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', None)
import os
import datetime
import warnings
warnings.filterwarnings("ignore") # ignoring annoying warnings
from time import time
from rich.progress import track
# Import libraries | Visualization
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
# Import libraries | Sk-learn
from sklearn.preprocessing import MinMaxScaler, RobustScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error, mean_squared_error, mean_squared_log_error
from sklearn.metrics.scorer import make_scorer
from sklearn.linear_model import LinearRegression, Lasso, ElasticNet
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.model_selection import KFold, cross_val_score, GridSearchCV
import xgboost as xgb
from lightgbm import LGBMRegressor
# udf function
from util_func import distribution
def read_data(file):
features = pd.read_csv('../raw_data/'+ file[0])
train = pd.read_csv('../raw_data/'+ file[1])
stores = pd.read_csv('../raw_data/'+ file[2])
test = pd.read_csv('../raw_data/'+ file[3])
return features,train,stores,test
filename = ["features.csv","train.csv","stores.csv","test.csv"]
stores = read_data(filename)[2]
features = read_data(filename)[0]
train = read_data(filename)[1]
test = read_data(filename)[3]
#################################################################################### 数据预处理
#################################################################################### (1) 缺失值异常值处理
#################################################################################### stores
# 异常门店信息(含索引)
print(stores[stores['Store'].isin([3,5,33,36])].index)
# index [2,4,32,35] type = 'C'
stores.iloc[2,1] = stores.iloc[4,1] = stores.iloc[32,1] = stores.iloc[35,1] = 'C'
#################################################################################### features
# Features Data | Negative values for MarkDowns
features['MarkDown1'] = features['MarkDown1'].apply(lambda x: 0 if x < 0 else x)
features['MarkDown2'] = features['MarkDown2'].apply(lambda x: 0 if x < 0 else x)
features['MarkDown3'] = features['MarkDown3'].apply(lambda x: 0 if x < 0 else x)
features['MarkDown4'] = features['MarkDown4'].apply(lambda x: 0 if x < 0 else x)
features['MarkDown5'] = features['MarkDown5'].apply(lambda x: 0 if x < 0 else x)
# Features Data | NaN values for multiple columns
for i in track(range(len(features))):
if features.iloc[i]['Date'] == '2013-04-26':
CPI_new = features.iloc[i]['CPI']
Unemployment_new = features.iloc[i]['Unemployment']
if np.isnan(features.iloc[i]['CPI']):
features.iat[i, 9] = CPI_new
features.iat[i, 10] = Unemployment_new
# Columns: MarkDown1, MarkDown2, MarkDown3, MarkDown4 & MarkDown5
features['Week'] = 0
for i in track(range(len(features))):
features.iat[i, 12] = datetime.date(
int(features.iloc[i]['Date'][0:4]),
int(features.iloc[i]['Date'][5:7]),
int(features.iloc[i]['Date'][8:10])
).isocalendar()[1]
# missing data for 2012 & 2013
features['Year'] = features['Date'].str.slice(start=0, stop=4)
total = features[features['Year'].isin(['2012','2013'])].isnull().sum().sort_values(ascending=False)
percent = (features[features['Year'].isin(['2012','2013'])].isnull().sum()/
features[features['Year'].isin(['2012','2013'])].isnull().count()
).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
print(missing_data.head())
# Iterate through stores
for i in track(range(1, len(features['Store'].unique()))):
# For 2010, iterate through weeks 5 thru 52
for j in range(5, 52):
idx = features.loc[(features.Year == '2010') & (features.Store == i) & (features.Week == j),['Date']].index[0]
features.iat[idx, 4] = features.loc[(features.Year == '2012') & (features.Store == i) & (features.Week == j),['MarkDown1']].values[0]
features.iat[idx, 5] = features.loc[(features.Year == '2012') & (features.Store == i) & (features.Week == j),['MarkDown2']].values[0]
features.iat[idx, 6] = features.loc[(features.Year == '2012') & (features.Store == i) & (features.Week == j),['MarkDown3']].values[0]
features.iat[idx, 7] = features.loc[(features.Year == '2012') & (features.Store == i) & (features.Week == j),['MarkDown4']].values[0]
features.iat[idx, 8] = features.loc[(features.Year == '2012') & (features.Store == i) & (features.Week == j),['MarkDown5']].values[0]
# For 2011, iterate through weeks 1 thru 44
for j in range(1, 44):
idx = features.loc[(features.Year == '2011') & (features.Store == i) & (features.Week == j),['Date']].index[0]
features.iat[idx, 4] = features.loc[(features.Year == '2012') & (features.Store == i) & (features.Week == j),['MarkDown1']].values[0]
features.iat[idx, 5] = features.loc[(features.Year == '2012') & (features.Store == i) & (features.Week == j),['MarkDown2']].values[0]
features.iat[idx, 6] = features.loc[(features.Year == '2012') & (features.Store == i) & (features.Week == j),['MarkDown3']].values[0]
features.iat[idx, 7] = features.loc[(features.Year == '2012') & (features.Store == i) & (features.Week == j),['MarkDown4']].values[0]
features.iat[idx, 8] = features.loc[(features.Year == '2012') & (features.Store == i) & (features.Week == j),['MarkDown5']].values[0]
features.drop(columns=['Year'], axis=1, inplace=True)
features.fillna(0, inplace=True)
#################################################################################### train
# Train Data | Negative Values for Weekly Sales
train['Weekly_Sales'] = train['Weekly_Sales'].apply(lambda x: 0 if x < 0 else x)
#################################################################################### (2) 合并数据集
# Merge the following datasets:
# Stores + Features + Train
# Stores + Features + Test
# Remove duplicate columns from each dataset
train = pd.merge(train, stores, how='left', on=['Store'])
train = | pd.merge(train, features, how='left', on=['Store','Date']) | pandas.merge |
# |------------------------------------------------------------------
# | # Flu Prediction - Time Series Analysis TS2
# |------------------------------------------------------------------
# |
# | ## 1. Introduction
# |
# | This is a notebook to practice the routine procedures
# | commonly used in the time sequence analysis.
# | This notebook is based on the Kaggle [Time Series Analysis](https://www.kaggle.com/learn/time-series)
# | offered by <NAME>.
# | A time series consists of __trend__, __seasonality__, __cycles__,
# | and __peculiarity__. For each features, we have a procedure to deal with,
# |
# | - For __trend__ : Analytical fitting of the baselines (linear, polynomial, etc)
# | - For __seasonality__ : Fourier decomposition.
# | - For __cycle__ : Lags.
# | - For __peculiarity__ : Categorical features.
# |
# | In this notebook we will learn how to use lagged features to make predictions.
# | We also learn some terminology.
# | - __Forecast horizon__ : a part of the time-sequence where we do forecast.
# | - __Forecast origin__ : a point of the time-sequence where the training data ends.
# | - __Lead time__ : a part of the time-sequence after the forecast origin, but before
# | the forecast horizon starts.
# | When we have a forecast horizon longer than one unit time, the prediction requires,
# | - __Multioutput model__.
# | We have a couple of strategies how to create a multiple output.
# | - __Direct strategy__ : Create one model for each day in the horizon,
# | and perform the prediction directly.
# | One needs so many models as the forecasting points in the forecast horizon.
# | - __Recursive strategy__ : First, train a model to predict the first
# | day in the horizon. Only the given training data is used for the training.
# | Use that same model to predict the second day in the horizon, but
# | now we have one new input from the day before (=the forecast on the first
# | day in the horizon).
# | - __DirRec strategy__ : Combination of the above two. Create a
# | model to forecast on the first day in the horizon. Use that new information
# | as a ground truth, and create the second model to forecast on the second day.
# | One needs so many models as the forecasting points in the forecast horizon.
# | ## 2. Task
# | From the historical record of the visits to the doctor's office in the past,
# | we will forecast the numbers of such visits in the future.
# | ## 3. Data
# | 1. The historical record of the visits to the doctor's office
# | over a week, starting from 2009 and ending in 2016.
# |
# | 2. The data above comes with the Google search records related to
# | a flu. The keywords and the number of searches are tabulated for
# | each week.
# | ## 4. Notebook
# -------------------------------------------------------
# | Import packages.
from pathlib import Path
import os
import pandas as pd
import numpy as np
from scipy.signal import periodogram
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.multioutput import MultiOutputRegressor, RegressorChain
from statsmodels.tsa.deterministic import CalendarFourier, DeterministicProcess
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.tsa.stattools import pacf
import statsmodels.api as sm
from xgboost import XGBRegressor
from IPython.display import display
import plotly.graph_objs as go
from plotly.subplots import make_subplots
import kaleido
from kaggle_tsa.ktsa import *
# -------------------------------------------------------
# | Set up directories.
CWD = Path('/Users/meg/git7/flu/')
DATA_DIR = Path('../input/ts-course-data/')
KAGGLE_DIR = Path('ryanholbrook/ts-course-data/')
IMAGE_DIR = Path('./images')
HTML_DIR = Path('./html')
os.chdir(CWD)
set_cwd(CWD)
# -------------------------------------------------------
# | If the data is not downloaded yet, do so now.
set_data_dir(KAGGLE_DIR, CWD)
show_whole_dataframe(True)
# -------------------------------------------------------
# | Read the data, first as it is.
flu = pd.read_csv(DATA_DIR/'flu-trends.csv')
print(flu.info())
display(flu.head(3))
# -------------------------------------------------------
# | First, let us only deal with `FluVisits`.
# | We would laso like to parse `Week`. We tried `parse_dates=["Week"]`,
# | and `infer_datetime_format=True`, but neither worked out.
# | There might be a simpler way, but this is all what I can
# | think of.
flu = pd.read_csv(DATA_DIR/'flu-trends.csv',
dtype={'FluVisits': 'float64'},
usecols=['Week', 'FluVisits'])
flu = split_week(flu, append=False)
is_index_continuous(flu, freq='W-MON')
# -------------------------------------------------------
# | Write a function to check the continuity of the index.
# | Raise flag if there are missing/skipped date in the sequence.
# | => Done.
# -------------------------------------------------------
# | Let us take a look at data again.
trace = go.Scatter(x=flu.index,
y=flu['FluVisits'])
data = [trace]
layout = go.Layout(height=512,
font=dict(size=16),
showlegend=False)
fig = go.Figure(data=data, layout=layout)
fig_wrap(fig, IMAGE_DIR/'fig1.png')
# -------------------------------------------------------
# | Create lag plots.
y = flu['FluVisits'].copy()
n_lag = 12
fig, corr = create_lag_plot(y, n_lag=n_lag, n_cols=3)
fig_wrap(fig, IMAGE_DIR/'fig2.png')
_ = [print(f'Lag {i:-2}: {y.autocorr(lag=i):5.3f}')
for i in range(1, n_lag+1)]
# -------------------------------------------------------
# | Create PACF (Partial Autocorrelation Function) plot.
fig = create_pacf_plot(y, n_lag=n_lag)
fig_wrap(fig, IMAGE_DIR/'fig3.png')
# -------------------------------------------------------
# | Lag 1, 2, 3, and 4 have significant correlation
# | with the target.
# -------------------------------------------------------
# | Construct the time dummy.
# | __Note this trick!__ One can use dictionary `{}`
# | in `pd.concat` to specify the column names and the values
# | in the same time.
y = flu['FluVisits'].copy()
n_lag = 4
X = pd.concat({f'y_lag_{i}': y.shift(i) for i in range(1, n_lag+1)},
axis=1).fillna(0.0)
# -------------------------------------------------------
# | Start the machine learning part.
# | Note __`shuffle=False`__, so that the test data comes
# | after the training data in the time sequence.
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, shuffle=False)
model = LinearRegression()
model.fit(X_train, y_train)
y_fit = model.predict(X_train)
y_pred = model.predict(X_test)
# -------------------------------------------------------
# | Let us see the results.
fig = show_training_results(X, y, X_train, y_fit, X_test, y_pred,
titles=('[Year]', '[Cases]',
'Flu-visit predictions in 2015-2016 (Lags only)'))
fig_wrap(fig, IMAGE_DIR/'fig4.png')
# -------------------------------------------------------
# | Error evaluation.
train_rmse = mean_squared_error(y_train, y_fit, squared=False)
print(f'Train RMSE : \033[96m{train_rmse:6.2f}\033[0m')
test_rmse = mean_squared_error(y_test, y_pred, squared=False)
print(f'Test RMSE : \033[96m{test_rmse:6.2f}\033[0m')
# -------------------------------------------------------
# | The test error being smaller than the training error indicates
# | the model is still under fitting. There is a room to improve the
# | forecast.
# |
# | We will see if we can make the error smaller if we include
# | the Google search keywords. First take a look at the data.
# |
search = pd.read_csv(DATA_DIR/'flu-trends.csv')
# | Which words are most often Google-searched?
search.mean(axis=0).sort_values()
# -------------------------------------------------------
# | Let us take the keywords that contains a string 'flu'.
s_cols_week = search.columns[search.columns.str.contains('flu|Week')]
s_cols = search.columns[search.columns.str.contains('flu')]
search = pd.read_csv(DATA_DIR/'flu-trends.csv',
dtype={s: 'float64' for s in s_cols},
usecols=s_cols_week)
search = split_week(search, append=False)
is_index_continuous(search, freq='W-MON')
# -------------------------------------------------------
# | Create lagged time features.
y = flu['FluVisits'].copy()
n_lag = 4
X_lag = pd.concat({f'y_lag_{i}': y.shift(i) for i in range(1, n_lag+1)},
axis=1).fillna(0.0)
X_lag.isna().sum().sum()
# -------------------------------------------------------
# | Create lagged search-words features.
y_search = search[s_cols]
y_search.isna().sum()
X_search = pd.concat({f'y_lag_{i}': y_search.shift(i)
for i in range(1, n_lag+1)}, axis=1).fillna(0.0)
X_search.isna().sum().sum()
X = | pd.concat([X_lag, X_search], axis=1) | pandas.concat |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, DatetimeIndex, Index, MultiIndex, Series
import pandas._testing as tm
from pandas.core.window.common import flex_binary_moment
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in {0, 1, 2, 3, 4, window}:
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
# binary moments
def test_rolling_cov(series):
A = series
B = A + np.random.randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_corr(series):
A = series
B = A + np.random.randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_rolling_pairwise_cov_corr(func, frame):
result = getattr(frame.rolling(window=10, min_periods=5), func)()
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].rolling(window=10, min_periods=5), func)(frame[5])
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize("method", ["corr", "cov"])
def test_flex_binary_frame(method, frame):
series = frame[1]
res = getattr(series.rolling(window=10), method)(frame)
res2 = getattr(frame.rolling(window=10), method)(series)
exp = frame.apply(lambda x: getattr(series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(frame.rolling(window=10), method)(frame2)
exp = DataFrame(
{k: getattr(frame[k].rolling(window=10), method)(frame2[k]) for k in frame}
)
tm.assert_frame_equal(res3, exp)
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum])
def test_rolling_apply_consistency_sum_nans(
consistency_data, window, min_periods, center, f
):
x, is_constant, no_nans = consistency_data
if f is np.nansum and min_periods == 0:
pass
else:
rolling_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).sum()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum, np.sum])
def test_rolling_apply_consistency_sum_no_nans(
consistency_data, window, min_periods, center, f
):
x, is_constant, no_nans = consistency_data
if no_nans:
if f is np.nansum and min_periods == 0:
pass
else:
rolling_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).sum()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize("window", range(7))
def test_rolling_corr_with_zero_variance(window):
# GH 18430
s = Series(np.zeros(20))
other = Series(np.arange(20))
assert s.rolling(window=window).corr(other=other).isna().all()
def test_flex_binary_moment():
# GH3155
# don't blow the stack
msg = "arguments to moment function must be of type np.ndarray/Series/DataFrame"
with pytest.raises(TypeError, match=msg):
flex_binary_moment(5, 6, None)
def test_corr_sanity():
# GH 3155
df = DataFrame(
np.array(
[
[0.87024726, 0.18505595],
[0.64355431, 0.3091617],
[0.92372966, 0.50552513],
[0.00203756, 0.04520709],
[0.84780328, 0.33394331],
[0.78369152, 0.63919667],
]
)
)
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
df = DataFrame(np.random.rand(30, 2))
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
def test_rolling_cov_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2a)
tm.assert_series_equal(result, expected)
def test_rolling_corr_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2a)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"f",
[
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(quantile=0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
pytest.param(
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
marks=td.skip_if_no_scipy,
),
],
)
def test_rolling_functions_window_non_shrinkage(f):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
@pytest.mark.parametrize(
"f",
[
lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)),
],
)
def test_rolling_functions_window_non_shrinkage_binary(f):
# corr/cov return a MI DataFrame
df = DataFrame(
[[1, 5], [3, 2], [3, 9], [-1, 0]],
columns=Index(["A", "B"], name="foo"),
index=Index(range(4), name="bar"),
)
df_expected = DataFrame(
columns=Index(["A", "B"], name="foo"),
index=MultiIndex.from_product([df.index, df.columns], names=["bar", "foo"]),
dtype="float64",
)
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_rolling_skew_edge_cases():
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).skew()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=2).skew()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 0.177994, 1.548824]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN, 0.177994, 1.548824])
x = d.rolling(window=4).skew()
tm.assert_series_equal(expected, x)
def test_rolling_kurt_edge_cases():
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).kurt()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=3).kurt()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 1.224307, 2.671499]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN, 1.224307, 2.671499])
x = d.rolling(window=4).kurt()
tm.assert_series_equal(expected, x)
def test_rolling_skew_eq_value_fperr():
# #18804 all rolling skew for all equal values should return Nan
a = | Series([1.1] * 15) | pandas.Series |
#
# Copyright (C) 2022 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
from sklearn.base import TransformerMixin, BaseEstimator
class DatetimeImputer(TransformerMixin, BaseEstimator):
"""Imputer for date and timestamp data."""
def __init__(self, strategy='mean', fill_value=None):
"""Create a `DatetimeImputer`.
Parameters
----------
strategy: imputation strategy, one of 'mean', 'median', 'most_frequent' or 'constant'
fill_value: the value used when `strategy` is 'constant'
"""
if strategy not in ('mean', 'median', 'most_frequent', 'constant'):
raise ValueError(f'Unknown strategy: {strategy}')
if strategy == 'constant' and not fill_value:
raise ValueError('A `fill_value` need to be provided for `constant` strategy.')
self.strategy = strategy
self.fill_value = fill_value
def fit(self, X, y=None):
"""Find necessary values (e.g. mean, median, or most_frequent) of the input data.
Parameters
----------
X: a pandas DataFrame whose values are date or timestamp.
y: not used
"""
self.fill_values = {}
for col_name, col_value in X.iteritems():
col_value = pd.to_datetime(col_value, errors="coerce")
if self.strategy == 'mean':
self.fill_values[col_name] = col_value.mean(skipna=True)
elif self.strategy == 'median':
self.fill_values[col_name] = col_value.median(skipna=True)
elif self.strategy == 'most_frequent':
self.fill_values[col_name] = col_value.mode(dropna=True)[0]
elif self.strategy == 'constant':
self.fill_values[col_name] = | pd.to_datetime(self.fill_value) | pandas.to_datetime |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas import Timestamp
def create_dataframe(tuple_data):
"""Create pandas df from tuple data with a header."""
return pd.DataFrame.from_records(tuple_data[1:], columns=tuple_data[0])
### REUSABLE FIXTURES --------------------------------------------------------
@pytest.fixture()
def indices_3years():
"""Three indices over 3 years."""
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0, 100.0, 100.0),
(Timestamp('2012-02-01 00:00:00'), 101.239553643, 96.60525323799999, 97.776838217),
(Timestamp('2012-03-01 00:00:00'), 102.03030533, 101.450821724, 96.59101862),
(Timestamp('2012-04-01 00:00:00'), 104.432402661, 98.000263617, 94.491213369),
(Timestamp('2012-05-01 00:00:00'), 105.122830333, 95.946873831, 93.731891785),
(Timestamp('2012-06-01 00:00:00'), 103.976692567, 97.45914568100001, 90.131064035),
(Timestamp('2012-07-01 00:00:00'), 106.56768678200001, 94.788761174, 94.53487522),
(Timestamp('2012-08-01 00:00:00'), 106.652151036, 98.478217946, 92.56165627700001),
(Timestamp('2012-09-01 00:00:00'), 108.97290730799999, 99.986521241, 89.647230903),
(Timestamp('2012-10-01 00:00:00'), 106.20124385700001, 99.237117891, 92.27819603799999),
(Timestamp('2012-11-01 00:00:00'), 104.11913898700001, 100.993436318, 95.758970985),
(Timestamp('2012-12-01 00:00:00'), 107.76600978, 99.60424011299999, 95.697091336),
(Timestamp('2013-01-01 00:00:00'), 98.74350698299999, 100.357120656, 100.24073830200001),
(Timestamp('2013-02-01 00:00:00'), 100.46305431100001, 99.98213513200001, 99.499007278),
(Timestamp('2013-03-01 00:00:00'), 101.943121499, 102.034291064, 96.043392231),
(Timestamp('2013-04-01 00:00:00'), 99.358987741, 106.513055039, 97.332012817),
(Timestamp('2013-05-01 00:00:00'), 97.128074038, 106.132168479, 96.799806436),
(Timestamp('2013-06-01 00:00:00'), 94.42944162, 106.615734964, 93.72086654600001),
(Timestamp('2013-07-01 00:00:00'), 94.872365481, 103.069773446, 94.490515359),
(Timestamp('2013-08-01 00:00:00'), 98.239415397, 105.458081805, 93.57271149299999),
(Timestamp('2013-09-01 00:00:00'), 100.36774827100001, 106.144579258, 90.314524375),
(Timestamp('2013-10-01 00:00:00'), 100.660205114, 101.844838294, 88.35136848399999),
(Timestamp('2013-11-01 00:00:00'), 101.33948384799999, 100.592230114, 93.02874928899999),
(Timestamp('2013-12-01 00:00:00'), 101.74876982299999, 102.709038791, 93.38277933200001),
(Timestamp('2014-01-01 00:00:00'), 101.73439491, 99.579700011, 104.755837919),
(Timestamp('2014-02-01 00:00:00'), 100.247760523, 100.76732961, 100.197855834),
(Timestamp('2014-03-01 00:00:00'), 102.82080245600001, 99.763171909, 100.252537549),
(Timestamp('2014-04-01 00:00:00'), 104.469889684, 96.207920184, 98.719797067),
(Timestamp('2014-05-01 00:00:00'), 105.268899775, 99.357641836, 99.99786671),
(Timestamp('2014-06-01 00:00:00'), 107.41649204299999, 100.844974811, 96.463821506),
(Timestamp('2014-07-01 00:00:00'), 110.146087435, 102.01075029799999, 94.332755083),
(Timestamp('2014-08-01 00:00:00'), 109.17068484100001, 101.562418115, 91.15410351700001),
(Timestamp('2014-09-01 00:00:00'), 109.872892919, 101.471759564, 90.502291475),
(Timestamp('2014-10-01 00:00:00'), 108.508436998, 98.801947543, 93.97423224399999),
(Timestamp('2014-11-01 00:00:00'), 109.91248118, 97.730489099, 90.50638234200001),
(Timestamp('2014-12-01 00:00:00'), 111.19756703600001, 99.734704555, 90.470418612),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-01-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years_start_feb(weights_3years):
return weights_3years.shift(1, freq='MS')
@pytest.fixture()
def weight_shares_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 0.489537029, 0.21362007800000002, 0.29684289199999997),
(Timestamp('2013-01-01 00:00:00'), 0.535477885, 0.147572705, 0.31694941),
(Timestamp('2014-01-01 00:00:00'), 0.512055362, 0.1940439, 0.293900738),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_shares_start_feb(weight_shares_3years):
return weight_shares_3years.shift(1, freq='MS')
@pytest.fixture()
def indices_1year(indices_3years):
return indices_3years.loc['2012', :]
@pytest.fixture()
def weights_1year(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_6months(indices_3years):
return indices_3years.loc['2012-Jan':'2012-Jun', :]
@pytest.fixture()
def weights_6months(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_transposed(indices_3years):
return indices_3years.T
@pytest.fixture()
def weights_transposed(weights_3years):
return weights_3years.T
@pytest.fixture()
def indices_missing(indices_3years):
indices_missing = indices_3years.copy()
change_to_nans = [
('2012-06', 2),
('2012-12', 3),
('2013-10', 2),
('2014-07', 1),
]
for sl in change_to_nans:
indices_missing.loc[sl] = np.nan
return indices_missing
@pytest.fixture()
def indices_missing_transposed(indices_missing):
return indices_missing.T
### AGGREGATION FIXTURES -----------------------------------------------------
@pytest.fixture()
def aggregate_outcome_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.47443727),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 102.4399192),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.93374613),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
(Timestamp('2013-12-01 00:00:00'), 99.23888357),
(Timestamp('2014-01-01 00:00:00'), 102.2042938),
(Timestamp('2014-02-01 00:00:00'), 100.3339127),
(Timestamp('2014-03-01 00:00:00'), 101.4726729),
(Timestamp('2014-04-01 00:00:00'), 101.17674840000001),
(Timestamp('2014-05-01 00:00:00'), 102.57269570000001),
(Timestamp('2014-06-01 00:00:00'), 102.9223313),
(Timestamp('2014-07-01 00:00:00'), 103.9199248),
(Timestamp('2014-08-01 00:00:00'), 102.3992605),
(Timestamp('2014-09-01 00:00:00'), 102.54967020000001),
(Timestamp('2014-10-01 00:00:00'), 102.35333840000001),
(Timestamp('2014-11-01 00:00:00'), 101.8451732),
(Timestamp('2014-12-01 00:00:00'), 102.8815443),
],
).set_index(0, drop=True).squeeze()
@pytest.fixture()
def aggregate_outcome_1year(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012']
@pytest.fixture()
def aggregate_outcome_6months(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012-Jan':'2012-Jun']
@pytest.fixture()
def aggregate_outcome_missing():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.75024119),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 105.2864531),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.08353503),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
(Timestamp('2013-12-01 00:00:00'), 99.23888357),
(Timestamp('2014-01-01 00:00:00'), 102.2042938),
(Timestamp('2014-02-01 00:00:00'), 100.3339127),
(Timestamp('2014-03-01 00:00:00'), 101.4726729),
(Timestamp('2014-04-01 00:00:00'), 101.17674840000001),
(Timestamp('2014-05-01 00:00:00'), 102.57269570000001),
(Timestamp('2014-06-01 00:00:00'), 102.9223313),
(Timestamp('2014-07-01 00:00:00'), 97.38610996),
(Timestamp('2014-08-01 00:00:00'), 102.3992605),
(Timestamp('2014-09-01 00:00:00'), 102.54967020000001),
(Timestamp('2014-10-01 00:00:00'), 102.35333840000001),
(Timestamp('2014-11-01 00:00:00'), 101.8451732),
(Timestamp('2014-12-01 00:00:00'), 102.8815443),
],
).set_index(0, drop=True).squeeze()
### WEIGHTS FIXTURES ------------------------------------------------------
@pytest.fixture()
def reindex_weights_to_indices_outcome_start_jan():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-02-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-03-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-04-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-05-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-06-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-07-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-08-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-09-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-10-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-11-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-12-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-02-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-03-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-04-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-05-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-06-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-07-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-08-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-09-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-10-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-11-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-12-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-01-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-02-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-03-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-04-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-05-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-06-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-07-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-08-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-09-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-10-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-11-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-12-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
],
).set_index(0, drop=True)
@pytest.fixture()
def reindex_weights_to_indices_outcome_start_feb():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-02-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-03-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-04-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-05-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-06-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-07-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-08-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-09-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-10-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-11-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-12-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-02-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-03-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
( | Timestamp('2013-04-01 00:00:00') | pandas.Timestamp |
# RCS14_entrainment_naive.py
# Generate timeseries analysis and power estimate
# Author: maria.olaru@
"""
Created on Mon May 3 18:22:44 2021
@author: mariaolaru
"""
import numpy as np
import pandas as pd
import glob
from datetime import datetime
import os
import re
def get_filepaths(dir_name):
nchars = len(dir_name)
if (dir_name[nchars-1] != '/'):
dir_name = dir_name + '/'
filepaths = glob.glob(dir_name + "Session*/" + "Device*")
return filepaths
def find_file(file_name, parent_dir):
#STEP 1: Get all files in all subdirectories of parent_dir
array_all = np.array([])
for root, subdirectories, files in os.walk(parent_dir):
if file_name in files:
file_match = os.path.join(root, file_name)
array_all = np.append(array_all, file_match)
return array_all
def preprocess_tdd(tdd, p):
"""
Parameters
----------
df : JSON to CSV converted time domain data.
Returns
-------
df_preproc : Restructured and reformatted tdd.
"""
tdd_rename = tdd.rename(columns={"DerivedTime": "timestamp", "key0": "ch0_mv", "key1": "ch1_mv", "key2": "ch2_mv", "key3": "ch3_mv"})
tdd_preproc = tdd_rename.drop(["localTime"], axis = 1)
tdd_preproc = tdd_preproc.drop(["samplerate"], axis = 1)
# sesh_id = os.path.basename(os.path.abspath(os.path.join(p, "../")))
# sesh_id = re.findall(r'\d+', sesh_id)[0]
# tdd_preproc.insert(0, 'session_id', sesh_id)
# tdd_preproc[['session_id']] = tdd_preproc[['session_id']].astype(int).astype(str)
tdd_preproc[['timestamp']] = tdd_preproc[['timestamp']].astype(int)
return tdd_preproc
def preprocess_sls(df):
"""
Parameters
----------
df : JSON to CSV converted stim log settings data.
Returns
-------
df_expanded : Restructured and reformatted sls data.
"""
df = df.rename(columns={"HostUnixTime": "timestamp_unix", "therapyStatus": "stim_status"})
df_expanded_params = df["stimParams_prog1"].str.split(pat = ",", expand = True)
df_expanded_params = df_expanded_params.rename(columns={0: "stim_contacts", 1: "stim_amp", 2: "stim_pw", 3: "stim_freq"})
df_expanded_params["stim_amp"] = df_expanded_params["stim_amp"].str.replace('mA', '')
df_expanded_params["stim_amp"] = df_expanded_params["stim_amp"].astype(float)
df_expanded_params["stim_pw"] = df_expanded_params["stim_pw"].str.replace('us', '')
df_expanded_params["stim_pw"] = df_expanded_params["stim_pw"].astype(int)
df_expanded_params["stim_freq"] = df_expanded_params["stim_freq"].str.replace('Hz', '')
df_expanded_params["stim_freq"] = df_expanded_params["stim_freq"].astype(float)
df_expanded_contact = df_expanded_params["stim_contacts"].str.split(pat = "+", expand = True)
df_expanded_contact = df_expanded_contact.rename(columns={0: "stim_contact_an", 1: "stim_contact"})
df_expanded_contact["stim_contact"] = df_expanded_contact["stim_contact"].str.replace('-', '')
df_expanded_contact["stim_contact"] = df_expanded_contact["stim_contact"].astype(int)
df_expanded_params = df_expanded_params.drop(["stim_contacts"], axis = 1)
df_expanded = pd.concat([df.loc[:, ["timestamp_unix", "stim_status"]], df_expanded_contact, df_expanded_params], axis=1)
indx = np.array(df_expanded[(df_expanded['stim_status'] == 1) & (df_expanded['stim_amp'] == 0)].index)
#include low stimulation amplitudes in field
if indx.size != 0:
df_expanded.loc[indx, 'stim_amp'] = 0.001
#change amplitude to reflect stimulation status
indx = np.array(df_expanded[(df_expanded['stim_status'] == 0) & (df_expanded['stim_amp'] != 0)].index)
if indx.size != 0:
df_expanded.loc[indx, 'stim_amp'] = 0
return df_expanded
def preprocess_tds(df):
"""
Parameters
----------
df : JSON to CSV converted time domain settings data.
Returns
-------
df_expanded : Restructured and reformatted tds data.
"""
#NEED DECIDE WHICH TIMESTAMP TO KEEP, TIMESTOP, OR TIMESTART
df = df.rename(columns={"timeStart": "timestamp_unix"}) #time start of settings
df = df.rename(columns={"timeStop": "timestamp_unix_stop"}) #time stop of settings
df_ch1 = expand_sense_params(df["chan1"], "ch0")
df_ch2 = expand_sense_params(df["chan2"], "ch1")
df_ch3 = expand_sense_params(df["chan3"], "ch2")
df_ch4 = expand_sense_params(df["chan4"], "ch3")
df_expanded = pd.concat([df["timestamp_unix"], df["timestamp_unix_stop"], df_ch1, df_ch2, df_ch3, df_ch4], axis = 1)
df_expanded = df_expanded.drop(['ch1_sr', 'ch2_sr', 'ch3_sr'], axis = 1)
df_expanded = df_expanded.rename(columns={'ch0_sr': 'sr'})
return df_expanded
def expand_sense_params(df, label):
"""
Parameters
----------
df : data from a single tds channel.
label : label of tds channel from df.
Returns
-------
df_expanded : expanded columns for each input datatype
"""
df_expanded_params = df.str.split(pat = " ", expand = True)
df_expanded_params = df_expanded_params.rename(columns={0: (label+"_sense_contacts"), 1: (label+"_lfp1"), 2: (label+"_lfp2"), 3: (label+"_sr")})
df_expanded_params[(label+"_lpf1")] = df_expanded_params[(label+"_lfp1")].str.replace('LFP1-', '')
# df_expanded_params[(label+"_lfp1")] = df_expanded_params[(label+"_lfp1")].astype(int)
df_expanded_params[(label+"_lpf2")] = df_expanded_params[(label+"_lfp2")].str.replace('LFP2-', '')
# df_expanded_params[(label+"_lfp2")] = df_expanded_params[(label+"_lfp2")].astype(int)
df_expanded_params[(label+"_lpfs")] = df_expanded_params[label+"_lpf1"] + '-' + df_expanded_params[label+"_lpf2"]
df_expanded_params[(label+"_sr")] = df_expanded_params[(label+"_sr")].str.replace('SR-', '')
df_expanded_params = df_expanded_params.drop([label + '_lfp1', label + '_lfp2', label + '_lpf1', label + '_lpf2'], axis = 1)
#Need to edit this later
if ((df_expanded_params[(label+"_sr")] == 'Disabled').any()):
indx_vals = df_expanded_params[df_expanded_params[(label+"_sr")]=='Disabled'].index
df_expanded_params[(label+"_sr")][indx_vals] = 0
print("Warning: hardcoding sr of 0 for Disabled value")
df_expanded_params[(label+"_sr")] = df_expanded_params[(label+"_sr")].astype(int)
#df_expanded_contact = df_expanded_params[(label+"_sense_contacts")].str.split(pat = "-", expand = True)
#df_expanded_contact = df_expanded_contact.rename(columns={0: (label+"_sense_contact_an"), 1: (label+"_sense_contact_cath")})
#df_expanded_contact[(label+"_sense_contact_an")] = df_expanded_contact[(label+"_sense_contact_an")].str.replace('+', '', regex=True)
#df_expanded_contact[(label+"_sense_contact_an")] = df_expanded_contact[(label+"_sense_contact_an")].astype(int)
#df_expanded_contact[(label+"_sense_contact_cath")] = df_expanded_contact[(label+"_sense_contact_cath")].astype(int)
#df_expanded_params = df_expanded_params.drop([(label+"_sense_contacts")], axis = 1)
#df_expanded = pd.concat([df_expanded_contact, df_expanded_params], axis=1)
return df_expanded_params
def preprocess_elt(df, p):
"""
Parameters
----------
df : JSON to CSV converted event log table data
Returns
-------
df_rename : Restructured and reformatted elt data.
"""
if not "SessionId" in df:
sesh_id = os.path.basename(os.path.abspath(os.path.join(p, "../")))
sesh_id = float(re.findall(r'\d+', sesh_id)[0])
if df.empty:
df = pd.DataFrame([sesh_id], columns = ['session_id'])
return df
df_rename = df.rename(columns={"HostUnixTime": "timestamp_unix", "SessionId": "session_id", "EventType": "event_type", "EventSubType" : "event_subtype"})
df_subset = df_rename[["session_id", "timestamp_unix", "event_type", "event_subtype"]]
#Get comments from older version of RCS implementation
partial_match = ["Feeling", "Balance", "Slowness", "Dyskinesia", "Dystonia", "Rigidity", "Speech", "Tremor", "Mania", "Sleep"]
import math
indx = np.array([])
for i in range(len(df_subset)):
entry = df_subset.loc[i, 'event_type']
if type(entry) != str:
if math.isnan(entry):
continue
for j in range(len(partial_match)):
pm = partial_match[j]
if entry.startswith(pm):
indx = np.append(indx, i)
if indx.size > 0:
df_reformat = df_subset.iloc[indx, :]
df_reformat = df_reformat.rename(columns = {'event_type': 'conditions', 'event_subtype': 'extra_comments'})
df_standard = pd.melt(df_reformat, id_vars=['session_id', 'timestamp_unix'], value_vars = ['conditions', 'extra_comments'])
df_standard = df_standard.rename(columns = {'variable': 'event_type', 'value': 'event_subtype'})
df_subset = pd.concat([df_subset, df_standard])
ls_keep = ["conditions", "medication", "extra_comments"]
df_select = df_subset.loc[df_subset['event_type'].isin(ls_keep)]
if (df_select.size == 0):
df_standard = df_subset[["session_id", "timestamp_unix"]]
df_reformat = df_standard.iloc[0:1, :]
else:
dfp = df_select.pivot(columns = 'event_type')['event_subtype']
if (not "conditions" in dfp):
dfp = dfp.assign(conditions = np.nan)
if (not "medication" in dfp):
dfp = dfp.assign(medication = np.nan)
if (not "extra_comments" in dfp):
dfp = dfp.assign(extra_comments = np.nan)
df_reformat = df_select[["session_id", "timestamp_unix"]]
df_reformat = df_reformat.assign(medication = pd.Series(dfp['medication']))
df_reformat = df_reformat.assign(symptoms = pd.Series(dfp['conditions']))
df_reformat = df_reformat.assign(comments = pd.Series(dfp['extra_comments']))
return df_reformat
def preprocess_md(df, p):
"""
Parameters
----------
df : JSON to CSV converted meta data.
Returns
-------
df : Restructured and reformatted md.
"""
if (df['implant_side'] == 'Undefined').any():
implant_side = os.path.abspath(os.path.join(p, "../.."))
implant_side = implant_side[-1]
df.implant_side[df.implant_side=="Undefined"]=implant_side
else:
df_implant_expanded = df[("implant_side")].str.split(pat = " ", expand = True)
df["implant_side"] = df_implant_expanded.iloc[:,0]
df_rename = df.rename(columns={"subj_ID": "subj_id"})
df_rename['subj_id'] = df_rename['subj_id'][0][:-1]
return df_rename
def settings_combine(eltp, mdp, slsp, tdsp, out_dir):
"""
Parameters
----------
eltp : preprocessed event log table data.
mdp : preprocessed meta data.
slsp : preprocessed stim log settings data.
tdsp : preprocessed time domain settings data.
out_dir : fullpath to parent directory of output.
Returns
-------
df : a single dataframe containing all input data.
"""
subj_id = mdp['subj_id'].unique()
subj_id = subj_id[~pd.isnull(subj_id)][0]
hemi = mdp['implant_side'].unique()
hemi = hemi[~pd.isnull(hemi)][0]
sesh_id = eltp['session_id'].unique()
sesh_id = sesh_id[~pd.isnull(sesh_id)][0]
tdspc = tdsp.drop(['timestamp_unix_stop'], axis=1)
df = slsp.append(tdspc)
df.insert(0, 'subj_id', subj_id)
df.insert(1, 'implant_side', hemi)
df.insert(2, 'session_id', sesh_id)
df = df.sort_values('timestamp_unix')
df = df.reset_index(drop = True)
timestamp_dt = convert_unix2dt(df["timestamp_unix"])
df.insert(4, 'timestamp', timestamp_dt)
df[['timestamp_unix']] = df[['timestamp_unix']].astype(int)
df[['session_id']] = df[['session_id']].astype(int).astype(str)
df.to_csv(out_dir + 'combined_settings.csv', index = False, header=True)
eltp.to_csv(out_dir + 'session_notes.csv', index = False, header = True)
return df
def convert_unix2dt(series):
"""
Parameters
----------
series : column from pandas dataframe in UNIX microsecond formatting
Returns
-------
timestamp_dt : series in date-time format
"""
if (len(series) == 1):
unix_s = series/1000
else:
unix_s = series.squeeze()/1000
timestamp_dt = np.zeros(len(unix_s), dtype='datetime64[ms]')
for i in range(len(timestamp_dt)):
timestamp_dt[i] = datetime.fromtimestamp(unix_s.iloc[i])
return timestamp_dt
def preprocess_settings(dir_name):
"""
Parameters
----------
path_list : full-path list of all directories to individually process, with data one level lower than head directory.
Returns
-------
Meta-settings table of all individual session settings tables
"""
paths = get_filepaths(dir_name)
msc = pd.DataFrame() #initialize metadata table of settings information
meltp = pd.DataFrame()
p_temp = paths[0]
gp = os.path.abspath(os.path.join(p_temp, "../.."))
subj_id = os.path.basename(gp)
msc_fp = gp + '/' + subj_id + '_meta_combined_settings.csv'
meltp_fp = gp + '/' + subj_id + '_meta_session_notes.csv'
if (os.path.exists(msc_fp)):
msc = pd.read_csv(msc_fp, header=0)
meltp = | pd.read_csv(meltp_fp, header=0) | pandas.read_csv |
import pandas as pd
import re
import sys
import os
blast_file = sys.argv[1]
sample=sys.argv[2]
data = pd.read_table(blast_file, header=None)
all_se1_ss2_se2_ss3 = {(data[9][i], data[13][i], data[14][i], data[18][i]) for i in range(len(data))}
if not os.path.exists("reads_group"):
os.mkdir("reads_group")
for item in list(all_se1_ss2_se2_ss3):
data_tmp = data[(data[9] == item[0]) & (data[13] == item[1]) & (data[14] == item[2]) & (data[18] == item[3])]
data_tmp.to_csv("reads_group/" + str(item[0]) + "_" + str(item[1]) + "_" + str(item[2]) + "_" + str(item[3]) + "_" + blast_file, header=False, index=False, sep="\t")
repsize_1 = list(set(data_tmp[3]))
repsize_1.sort()
repsize_str_list_1 = [str(item) for item in repsize_1]
repsize_str_1 = ",".join(repsize_str_list_1)
repsize_2 = list(set(data_tmp[4]))
repsize_2.sort()
repsize_str_list_2 = [str(item) for item in repsize_2]
repsize_str_2 = ",".join(repsize_str_list_2)
with open("count_se1_ss2_se2_ss3_" + blast_file, "at") as f_count:
print("\t".join([str(item[0]), str(item[1]), str(item[2]), str(item[3]), str(len(data_tmp)), repsize_str_1, repsize_str_2, sample]), file=f_count)
data_count = pd.read_table("count_se1_ss2_se2_ss3_" + blast_file, header=None)
all_se1_ss2_se2_ss3 = {(data_count[0][i], data_count[1][i], data_count[2][i], data_count[3][i]) for i in range(len(data_count))}
all_list = list(all_se1_ss2_se2_ss3)
all_list.sort(key=lambda x:x[0])
df_count=pd.DataFrame(columns=["se1", "ss2", "se2", "ss3", sample], index=all_list)
for i in range(len(data_count)):
index_tuple = (data_count[0][i], data_count[1][i], data_count[2][i], data_count[3][i])
df_count[data_count[7][i]][index_tuple] = data_count[4][i]
df_count.fillna(0, inplace=True)
df_count.to_excel(sample + "_se1_ss2_se2_ss3_annotation.xlsx")
df_count = pd.read_excel(sample + "_se1_ss2_se2_ss3_annotation.xlsx")
df_count.rename(columns={"Unnamed: 0":"se1_ss2_se2_ss3"}, inplace=True)
data1 = | pd.read_table("count_se1_ss2_se2_ss3_" + blast_file, header=None) | pandas.read_table |
"""
boydsworld_scraper
A scraper module for boydsworld.com historical game results
Created by <NAME> in November 2021
"""
# Imports
import pandas as pd
import numpy as np
import requests
from io import StringIO
from datetime import date
import lxml
def get_games(school, start, end=None, vs="all", parse_dates=True):
"""
A function to scrape game results data from boydsworld.com.
Valid 1992 to 2021
Args:
team_1 (str): team whose games to select
start (int): the start year of games
end (int): the end season of games.
vs (str): school to filter games against. default: 'all'
parse_dates (bool): whether to parse data into datetime64
Returns:
Dataframe of all games played for a given team inclusive of start & end.
data from boydsworld.com
"""
url = "http://www.boydsworld.com/cgi/scores.pl"
try:
df = (_get_data(school, start, end=end, vs=vs, parse_dates=parse_dates)
.pipe(_enrich_data, school)
.pipe(_set_dtypes)
.drop(columns=["team_1","team_1_score","team_2","team_2_score"])
.sort_values(by="date", axis=0, ascending=True)
)
return df
except:
print(f'''no records found for {school} between {start} and {end}''')
return pd.DataFrame()
def _get_data(school, start, end=None, vs="all", parse_dates=True):
"""
A helper function to send GET request to boydsworld.com and parse data
"""
col_names = ["date", "team_1", "team_1_score", "team_2", "team_2_score", "field"]
url = 'http://www.boydsworld.com/cgi/scores.pl'
# if no end season give, obtain single-season results
if end is None:
end = start
try:
payload = {"team1":school, "firstyear":str(start), "team2":vs, \
"lastyear":str(end), "format":"HTML","submit":"Fetch"}
s = requests.Session()
r = requests.get(url, params=payload)
response = r.text
io = StringIO(response).read()
dfs = pd.read_html(io=io, parse_dates=parse_dates)
df = dfs[1].dropna(how="all", axis=1, inplace=False)
if len(df.columns) != len(col_names):
print("no records found")
return pd.DataFrame()
else:
df.columns = col_names
if parse_dates:
# make sure dates are parsed as type datetime64[ns]
df['date'] = pd.to_datetime(df['date'], infer_datetime_format=True)
return df
except:
return pd.DataFrame()
def _enrich_data(df, school):
"""
A helper function that adds the following columns to a given DataFrame:
opponent (str): opponent for each game.
runs_allowed (int): the number of runs scored by team_1 in each game
runs_scored (int): the number of runs scored by team_1 in each game
run_difference (int): the difference between team_1's runs scored
and runs allowed for each game
"""
wins = df[(df["team_1"] == school) & \
(df["team_1_score"] > df["team_2_score"])].copy()
losses = df[(df["team_2"] == school) & \
(df["team_1_score"] > df["team_2_score"])].copy()
# set for wins
wins.loc[:,"runs_scored"] = wins.loc[:,"team_1_score"]
wins.loc[:,"runs_allowed"] = wins.loc[:,"team_2_score"]
wins.loc[:,"opponent"] = wins.loc[:,"team_2"]
# set for losses
losses.loc[:,"runs_scored"] = losses.loc[:,"team_2_score"]
losses.loc[:,"runs_allowed"] = losses.loc[:,"team_1_score"]
losses.loc[:,"opponent"] = losses.loc[:,"team_1"]
# combine dfs
df = | pd.concat([wins,losses]) | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
self.assertEqual(repr(df), exp)
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({'int64': np.random.randint(100, size=n)})
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category'] == 'd']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5, 1], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
s = pd.Series(pd.Categorical(
["a", "b", "c", "c", "c", "b"], categories=["c", "a", "b", "d"]))
res = s.value_counts(sort=False)
exp = Series([3, 1, 2, 0],
index=pd.CategoricalIndex(["c", "a", "b", "d"]))
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp = Series([3, 2, 1, 0],
index=pd.CategoricalIndex(["c", "b", "a", "d"]))
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# https://github.com/pydata/pandas/issues/9443
s = pd.Series(["a", "b", "a"], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
s = pd.Series(["a", "b", None, "a", None, None], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
tm.assert_series_equal(
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", "a"], categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1, 0],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", None, "a", None, None], categories=["a", "b", np.nan
]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1],
index=pd.CategoricalIndex([np.nan, "a", "b"])))
def test_groupby(self):
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"
], categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
expected = DataFrame({'a': Series(
[1, 2, 4, np.nan], index=pd.CategoricalIndex(
['a', 'b', 'c', 'd'], name='b'))})
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A")
exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A')
expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers
gb = df.groupby(['A', 'B'])
expected = DataFrame({'values': Series(
[1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan
], index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']], names=['A', 'B']))})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers with a non-cat
df = df.copy()
df['C'] = ['foo', 'bar'] * 2
gb = df.groupby(['A', 'B', 'C'])
expected = DataFrame({'values': Series(
np.nan, index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y'], ['foo', 'bar']
], names=['A', 'B', 'C']))}).sortlevel()
expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name)
g = x.groupby(['person_id'])
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
# Filter
tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
# GH 9603
df = pd.DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4])
result = df.groupby(c).apply(len)
expected = pd.Series([1, 0, 0, 0],
index=pd.CategoricalIndex(c.values.categories))
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
def test_pivot_table(self):
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'])
expected = Series([1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']],
names=['A', 'B']),
name='values')
tm.assert_series_equal(result, expected)
def test_count(self):
s = Series(Categorical([np.nan, 1, 2, np.nan],
categories=[5, 4, 3, 2, 1], ordered=True))
result = s.count()
self.assertEqual(result, 2)
def test_sort(self):
c = Categorical(["a", "b", "b", "a"], ordered=False)
cat = Series(c)
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
c.order()
# sort in the categories order
expected = Series(
Categorical(["a", "a", "b", "b"],
ordered=False), index=[0, 3, 1, 2])
result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "c", "b", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a", "c", "b", "d"], categories=[
"a", "b", "c", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(["a", "b", "c", "d"],
categories=["a", "b", "c", "d"], ordered=False)
raw_cat2 = Categorical(["a", "b", "c", "d"],
categories=["d", "c", "b", "a"], ordered=True)
s = ["a", "b", "c", "d"]
df = DataFrame({"unsort": raw_cat1,
"sort": raw_cat2,
"string": s,
"values": [1, 2, 3, 4]})
# Cats must be sorted in a dataframe
res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
res = df.sort_values(by=["sort"], ascending=False)
exp = df.sort_values(by=["string"], ascending=True)
self.assert_numpy_array_equal(res["values"], exp["values"])
self.assertEqual(res["sort"].dtype, "category")
self.assertEqual(res["unsort"].dtype, "category")
# unordered cat, but we allow this
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
df = DataFrame({"id": [6, 5, 4, 3, 2, 1],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"], ordered=True)
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
result = df.sort_values(by=['grade'])
expected = df.iloc[[1, 2, 5, 0, 3, 4]]
tm.assert_frame_equal(result, expected)
# multi
result = df.sort_values(by=['grade', 'id'])
expected = df.iloc[[2, 1, 5, 4, 3, 0]]
tm.assert_frame_equal(result, expected)
# reverse
cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
# some NaN positions
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
reversed = cat[::-1]
exp = np.array([4, 3, 2, 1])
self.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100) + 1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
expected = Series([11, '(0, 25]'], index=['value', 'D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11, 21).astype('int64')},
index=np.arange(10, 20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9, '(0, 25]'], index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = pd.Categorical(
["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 2, 3, 4, 5, 6, 7]
df = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
cats2 = pd.Categorical(["b", "c"], categories=["a", "b", "c"])
idx2 = pd.Index(["j", "k"])
values2 = [3, 4]
# 2:4,: | "j":"k",:
exp_df = pd.DataFrame({"cats": cats2, "values": values2}, index=idx2)
# :,"cats" | :,0
exp_col = pd.Series(cats, index=idx, name='cats')
# "j",: | 2,:
exp_row = pd.Series(["b", 3], index=["cats", "values"], dtype="object",
name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4, :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.iloc[2, :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.iloc[2, 0]
self.assertEqual(res_val, exp_val)
# loc
# frame
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.loc["j", "cats"]
self.assertEqual(res_val, exp_val)
# ix
# frame
# res_df = df.ix["j":"k",[0,1]] # doesn't work?
res_df = df.ix["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.ix["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.ix[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.ix["j", 0]
self.assertEqual(res_val, exp_val)
# iat
res_val = df.iat[2, 0]
self.assertEqual(res_val, exp_val)
# at
res_val = df.at["j", "cats"]
self.assertEqual(res_val, exp_val)
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy, exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy, exp_fancy)
# get_value
res_val = df.get_value("j", "cats")
self.assertEqual(res_val, exp_val)
# i : int, slice, or sequence of integers
res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
res_df = df.iloc[slice(2, 4)]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[[2, 3]]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
res_df = df.iloc[:, slice(0, 2)]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[:, [0, 1]]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
def test_slicing_doc_examples(self):
# GH 7918
cats = Categorical(
["a", "b", "b", "b", "c", "c", "c"], categories=["a", "b", "c"])
idx = Index(["h", "i", "j", "k", "l", "m", "n", ])
values = [1, 2, 2, 2, 3, 4, 5]
df = DataFrame({"cats": cats, "values": values}, index=idx)
result = df.iloc[2:4, :]
expected = DataFrame(
{"cats": Categorical(
['b', 'b'], categories=['a', 'b', 'c']),
"values": [2, 2]}, index=['j', 'k'])
tm.assert_frame_equal(result, expected)
result = df.iloc[2:4, :].dtypes
expected = Series(['category', 'int64'], ['cats', 'values'])
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", "cats"]
expected = Series(Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c']),
index=['h', 'i', 'j'], name='cats')
tm.assert_series_equal(result, expected)
result = df.ix["h":"j", 0:1]
expected = DataFrame({'cats': Series(
Categorical(
['a', 'b', 'b'], categories=['a', 'b', 'c']), index=['h', 'i',
'j'])})
tm.assert_frame_equal(result, expected)
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
cats = pd.Categorical(
["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
# changed single row
cats1 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx1 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = pd.DataFrame(
{"cats": cats1,
"values": values1}, index=idx1)
# changed multiple rows
cats2 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx2 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = pd.DataFrame(
{"cats": cats2,
"values": values2}, index=idx2)
# changed part of the cats column
cats3 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = pd.DataFrame(
{"cats": cats3,
"values": values3}, index=idx3)
# changed single value in cats col
cats4 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = pd.DataFrame(
{"cats": cats4,
"values": values4}, index=idx4)
# iloc
# ###############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iloc[2, 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.iloc[2, :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.iloc[2:4, :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.iloc[2:4, 0] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.loc["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.loc["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.loc["j":"k", "cats"] = ["c", "c"]
# ix
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.ix["j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.ix[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.ix["j", 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.ix["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.ix["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.ix["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.ix["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.ix["j":"k", 0] = ["c", "c"]
# iat
df = orig.copy()
df.iat[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iat[2, 0] = "c"
self.assertRaises(ValueError, f)
# at
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.at["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.at["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# fancy indexing
catsf = pd.Categorical(
["a", "a", "c", "c", "a", "a", "a"], categories=["a", "b", "c"])
idxf = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
valuesf = [1, 1, 3, 3, 1, 1, 1]
df = pd.DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
exp_fancy["cats"].cat.set_categories(["a", "b", "c"], inplace=True)
df[df["cats"] == "c"] = ["b", 2]
tm.assert_frame_equal(df, exp_multi_row)
# set_value
df = orig.copy()
df.set_value("j", "cats", "b")
tm.assert_frame_equal(df, exp_single_cats_value)
def f():
df = orig.copy()
df.set_value("j", "cats", "c")
self.assertRaises(ValueError, f)
# Assigning a Category to parts of a int/... column uses the values of
# the Catgorical
df = pd.DataFrame({"a": [1, 1, 1, 1, 1],
"b": ["a", "a", "a", "a", "a"]})
exp = pd.DataFrame({"a": [1, "b", "b", 1, 1],
"b": ["a", "a", "b", "b", "a"]})
df.loc[1:2, "a"] = pd.Categorical(["b", "b"], categories=["a", "b"])
df.loc[2:3, "b"] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp)
# Series
orig = Series(pd.Categorical(["b", "b"], categories=["a", "b"]))
s = orig.copy()
s[:] = "a"
exp = Series(pd.Categorical(["a", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[1] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[s.index > 0] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[[False, True]] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s.index = ["x", "y"]
s["y"] = "a"
exp = Series(
pd.Categorical(["b", "a"],
categories=["a", "b"]), index=["x", "y"])
tm.assert_series_equal(s, exp)
# ensure that one can set something to np.nan
s = Series(Categorical([1, 2, 3]))
exp = Series(Categorical([1, np.nan, 3]))
s[1] = np.nan
tm.assert_series_equal(s, exp)
def test_comparisons(self):
tests_data = [(list("abc"), list("cba"), list("bbb")),
([1, 2, 3], [3, 2, 1], [2, 2, 2])]
for data, reverse, base in tests_data:
cat_rev = pd.Series(pd.Categorical(data, categories=reverse,
ordered=True))
cat_rev_base = pd.Series(pd.Categorical(base, categories=reverse,
ordered=True))
cat = pd.Series(pd.Categorical(data, ordered=True))
cat_base = pd.Series(pd.Categorical(
base, categories=cat.cat.categories, ordered=True))
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
def f():
cat > "b"
self.assertRaises(TypeError, f)
cat = Series(Categorical(list("abc"), ordered=False))
def f():
cat > "b"
self.assertRaises(TypeError, f)
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
self.assertRaises(TypeError, lambda: cat < "d")
self.assertRaises(TypeError, lambda: cat > "d")
self.assertRaises(TypeError, lambda: "d" < cat)
self.assertRaises(TypeError, lambda: "d" > cat)
self.assert_series_equal(cat == "d", Series([False, False, False]))
self.assert_series_equal(cat != "d", Series([True, True, True]))
# And test NaN handling...
cat = Series(Categorical(["a", "b", "c", np.nan]))
exp = Series([True, True, True, False])
res = (cat == cat)
tm.assert_series_equal(res, exp)
def test_cat_equality(self):
# GH 8938
# allow equality comparisons
a = Series(list('abc'), dtype="category")
b = Series(list('abc'), dtype="object")
c = Series(['a', 'b', 'cc'], dtype="object")
d = Series(list('acb'), dtype="object")
e = Categorical(list('abc'))
f = Categorical(list('acb'))
# vs scalar
self.assertFalse((a == 'a').all())
self.assertTrue(((a != 'a') == ~(a == 'a')).all())
self.assertFalse(('a' == a).all())
self.assertTrue((a == 'a')[0])
self.assertTrue(('a' == a)[0])
self.assertFalse(('a' != a)[0])
# vs list-like
self.assertTrue((a == a).all())
self.assertFalse((a != a).all())
self.assertTrue((a == list(a)).all())
self.assertTrue((a == b).all())
self.assertTrue((b == a).all())
self.assertTrue(((~(a == b)) == (a != b)).all())
self.assertTrue(((~(b == a)) == (b != a)).all())
self.assertFalse((a == c).all())
self.assertFalse((c == a).all())
self.assertFalse((a == d).all())
self.assertFalse((d == a).all())
# vs a cat-like
self.assertTrue((a == e).all())
self.assertTrue((e == a).all())
self.assertFalse((a == f).all())
self.assertFalse((f == a).all())
self.assertTrue(((~(a == e) == (a != e)).all()))
self.assertTrue(((~(e == a) == (e != a)).all()))
self.assertTrue(((~(a == f) == (a != f)).all()))
self.assertTrue(((~(f == a) == (f != a)).all()))
# non-equality is not comparable
self.assertRaises(TypeError, lambda: a < b)
self.assertRaises(TypeError, lambda: b < a)
self.assertRaises(TypeError, lambda: a > b)
self.assertRaises(TypeError, lambda: b > a)
def test_concat(self):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = pd.DataFrame({"cats": cat2,
"vals": vals2}, index=pd.Index([0, 1, 0, 1]))
res = pd.concat([df, df])
tm.assert_frame_equal(exp, res)
# Concat should raise if the two categoricals do not have the same
# categories
cat3 = pd.Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_wrong_categories = pd.DataFrame({"cats": cat3, "vals": vals3})
def f():
pd.concat([df, df_wrong_categories])
self.assertRaises(ValueError, f)
# GH 7864
# make sure ordering is preserverd
df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"])
df['grade'].cat.set_categories(['e', 'a', 'b'])
df1 = df[0:3]
df2 = df[3:]
self.assert_numpy_array_equal(df['grade'].cat.categories,
df1['grade'].cat.categories)
self.assert_numpy_array_equal(df['grade'].cat.categories,
df2['grade'].cat.categories)
dfx = pd.concat([df1, df2])
dfx['grade'].cat.categories
self.assert_numpy_array_equal(df['grade'].cat.categories,
dfx['grade'].cat.categories)
def test_concat_preserve(self):
# GH 8641
# series concat not preserving category dtype
s = Series(list('abc'), dtype='category')
s2 = Series(list('abd'), dtype='category')
def f():
pd.concat([s, s2])
self.assertRaises(ValueError, f)
result = pd.concat([s, s], ignore_index=True)
expected = Series(list('abcabc')).astype('category')
tm.assert_series_equal(result, expected)
result = pd.concat([s, s])
expected = Series(
list('abcabc'), index=[0, 1, 2, 0, 1, 2]).astype('category')
tm.assert_series_equal(result, expected)
a = Series(np.arange(6, dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A': a,
'B': b.astype('category', categories=list('cab'))})
result = pd.concat([df2, df2])
expected = DataFrame({'A': pd.concat([a, a]),
'B': pd.concat([b, b]).astype(
'category', categories=list('cab'))})
tm.assert_frame_equal(result, expected)
def test_categorical_index_preserver(self):
a = Series(np.arange(6, dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A': a,
'B': b.astype('category', categories=list(
'cab'))}).set_index('B')
result = pd.concat([df2, df2])
expected = DataFrame({'A': pd.concat([a, a]),
'B': pd.concat([b, b]).astype(
'category', categories=list(
'cab'))}).set_index('B')
tm.assert_frame_equal(result, expected)
# wrong catgories
df3 = DataFrame({'A': a,
'B': b.astype('category', categories=list(
'abc'))}).set_index('B')
self.assertRaises(TypeError, lambda: pd.concat([df2, df3]))
def test_append(self):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = pd.DataFrame({"cats": cat2,
"vals": vals2}, index=pd.Index([0, 1, 0, 1]))
res = df.append(df)
tm.assert_frame_equal(exp, res)
# Concat should raise if the two categoricals do not have the same
# categories
cat3 = pd.Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_wrong_categories = pd.DataFrame({"cats": cat3, "vals": vals3})
def f():
df.append(df_wrong_categories)
self.assertRaises(ValueError, f)
def test_merge(self):
# GH 9426
right = DataFrame({'c': {0: 'a',
1: 'b',
2: 'c',
3: 'd',
4: 'e'},
'd': {0: 'null',
1: 'null',
2: 'null',
3: 'null',
4: 'null'}})
left = DataFrame({'a': {0: 'f',
1: 'f',
2: 'f',
3: 'f',
4: 'f'},
'b': {0: 'g',
1: 'g',
2: 'g',
3: 'g',
4: 'g'}})
df = pd.merge(left, right, how='left', left_on='b', right_on='c')
# object-object
expected = df.copy()
# object-cat
cright = right.copy()
cright['d'] = cright['d'].astype('category')
result = pd.merge(left, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
# cat-object
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
# cat-cat
cright = right.copy()
cright['d'] = cright['d'].astype('category')
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
def test_repeat(self):
# GH10183
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
exp = pd.Categorical(["a", "a", "b", "b"], categories=["a", "b"])
res = cat.repeat(2)
self.assert_categorical_equal(res, exp)
def test_na_actions(self):
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
vals = ["a", "b", np.nan, "d"]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical([1, 2, 3, 3], categories=[1, 2, 3])
vals2 = ["a", "b", "b", "d"]
df_exp_fill = pd.DataFrame({"cats": cat2, "vals": vals2})
cat3 = pd.Categorical([1, 2, 3], categories=[1, 2, 3])
vals3 = ["a", "b", np.nan]
df_exp_drop_cats = pd.DataFrame({"cats": cat3, "vals": vals3})
cat4 = pd.Categorical([1, 2], categories=[1, 2, 3])
vals4 = ["a", "b"]
df_exp_drop_all = pd.DataFrame({"cats": cat4, "vals": vals4})
# fillna
res = df.fillna(value={"cats": 3, "vals": "b"})
tm.assert_frame_equal(res, df_exp_fill)
def f():
df.fillna(value={"cats": 4, "vals": "c"})
self.assertRaises(ValueError, f)
res = df.fillna(method='pad')
tm.assert_frame_equal(res, df_exp_fill)
res = df.dropna(subset=["cats"])
tm.assert_frame_equal(res, df_exp_drop_cats)
res = df.dropna()
tm.assert_frame_equal(res, df_exp_drop_all)
# make sure that fillna takes both missing values and NA categories
# into account
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
df = pd.DataFrame({"cats": c, "vals": [1, 2, 3]})
df_exp = pd.DataFrame({"cats": Categorical(["a", "b", "a"]),
"vals": [1, 2, 3]})
res = df.fillna("a")
tm.assert_frame_equal(res, df_exp)
def test_astype_to_other(self):
s = self.cat['value_group']
expected = s
tm.assert_series_equal(s.astype('category'), expected)
tm.assert_series_equal(s.astype(com.CategoricalDtype()), expected)
self.assertRaises(ValueError, lambda: s.astype('float64'))
cat = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))
exp = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
tm.assert_series_equal(cat.astype('str'), exp)
s2 = Series(Categorical.from_array(['1', '2', '3', '4']))
exp2 = Series([1, 2, 3, 4]).astype(int)
tm.assert_series_equal(s2.astype('int'), exp2)
# object don't sort correctly, so just compare that we have the same
# values
def cmp(a, b):
tm.assert_almost_equal(
np.sort(np.unique(a)), np.sort(np.unique(b)))
expected = Series(np.array(s.values), name='value_group')
cmp(s.astype('object'), expected)
cmp(s.astype(np.object_), expected)
# array conversion
tm.assert_almost_equal(np.array(s), np.array(s.values))
# valid conversion
for valid in [lambda x: x.astype('category'),
lambda x: x.astype(com.CategoricalDtype()),
lambda x: x.astype('object').astype('category'),
lambda x: x.astype('object').astype(
com.CategoricalDtype())
]:
result = valid(s)
tm.assert_series_equal(result, s)
# invalid conversion (these are NOT a dtype)
for invalid in [lambda x: x.astype(pd.Categorical),
lambda x: x.astype('object').astype(pd.Categorical)]:
self.assertRaises(TypeError, lambda: invalid(s))
def test_astype_categorical(self):
cat = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
tm.assert_categorical_equal(cat, cat.astype('category'))
tm.assert_almost_equal(np.array(cat), cat.astype('object'))
self.assertRaises(ValueError, lambda: cat.astype(float))
def test_to_records(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# to record array
# this coerces
result = df.to_records()
expected = np.rec.array([(0, 'a'), (1, 'b'), (2, 'c')],
dtype=[('index', '=i8'), ('0', 'O')])
tm.assert_almost_equal(result, expected)
def test_numeric_like_ops(self):
# numeric ops should not succeed
for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
self.assertRaises(TypeError,
lambda: getattr(self.cat, op)(self.cat))
# reduction ops should not succeed (unless specifically defined, e.g.
# min/max)
s = self.cat['value_group']
for op in ['kurt', 'skew', 'var', 'std', 'mean', 'sum', 'median']:
self.assertRaises(TypeError,
lambda: getattr(s, op)(numeric_only=False))
# mad technically works because it takes always the numeric data
# numpy ops
s = pd.Series(pd.Categorical([1, 2, 3, 4]))
self.assertRaises(TypeError, lambda: np.sum(s))
# numeric ops on a Series
for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
self.assertRaises(TypeError, lambda: getattr(s, op)(2))
# invalid ufunc
self.assertRaises(TypeError, lambda: np.log(s))
def test_cat_tab_completition(self):
# test the tab completion display
ok_for_cat = ['categories', 'codes', 'ordered', 'set_categories',
'add_categories', 'remove_categories',
'rename_categories', 'reorder_categories',
'remove_unused_categories', 'as_ordered', 'as_unordered']
def get_dir(s):
results = [r for r in s.cat.__dir__() if not r.startswith('_')]
return list(sorted(set(results)))
s = Series(list('aabbcde')).astype('category')
results = get_dir(s)
tm.assert_almost_equal(results, list(sorted(set(ok_for_cat))))
def test_cat_accessor_api(self):
# GH 9322
from pandas.core.categorical import CategoricalAccessor
self.assertIs(Series.cat, CategoricalAccessor)
s = Series(list('aabbcde')).astype('category')
self.assertIsInstance(s.cat, CategoricalAccessor)
invalid = Series([1])
with tm.assertRaisesRegexp(AttributeError, "only use .cat accessor"):
invalid.cat
self.assertFalse(hasattr(invalid, 'cat'))
def test_cat_accessor_no_new_attributes(self):
# https://github.com/pydata/pandas/issues/10673
c = Series(list('aabbcde')).astype('category')
with tm.assertRaisesRegexp(AttributeError,
"You cannot add any new attribute"):
c.cat.xlabel = "a"
def test_str_accessor_api_for_categorical(self):
# https://github.com/pydata/pandas/issues/10661
from pandas.core.strings import StringMethods
s = Series(list('aabb'))
s = s + " " + s
c = s.astype('category')
self.assertIsInstance(c.str, StringMethods)
# str functions, which need special arguments
special_func_defs = [
('cat', (list("zyxw"),), {"sep": ","}),
('center', (10,), {}),
('contains', ("a",), {}),
('count', ("a",), {}),
('decode', ("UTF-8",), {}),
('encode', ("UTF-8",), {}),
('endswith', ("a",), {}),
('extract', ("([a-z]*) ",), {}),
('find', ("a",), {}),
('findall', ("a",), {}),
('index', (" ",), {}),
('ljust', (10,), {}),
('match', ("a"), {}), # deprecated...
('normalize', ("NFC",), {}),
('pad', (10,), {}),
('partition', (" ",), {"expand": False}), # not default
('partition', (" ",), {"expand": True}), # default
('repeat', (3,), {}),
('replace', ("a", "z"), {}),
('rfind', ("a",), {}),
('rindex', (" ",), {}),
('rjust', (10,), {}),
('rpartition', (" ",), {"expand": False}), # not default
('rpartition', (" ",), {"expand": True}), # default
('slice', (0, 1), {}),
('slice_replace', (0, 1, "z"), {}),
('split', (" ",), {"expand": False}), # default
('split', (" ",), {"expand": True}), # not default
('startswith', ("a",), {}),
('wrap', (2,), {}),
('zfill', (10,), {})
]
_special_func_names = [f[0] for f in special_func_defs]
# * get, join: they need a individual elements of type lists, but
# we can't make a categorical with lists as individual categories.
# -> `s.str.split(" ").astype("category")` will error!
# * `translate` has different interfaces for py2 vs. py3
_ignore_names = ["get", "join", "translate"]
str_func_names = [f
for f in dir(s.str)
if not (f.startswith("_") or f in _special_func_names
or f in _ignore_names)]
func_defs = [(f, (), {}) for f in str_func_names]
func_defs.extend(special_func_defs)
for func, args, kwargs in func_defs:
res = getattr(c.str, func)(*args, **kwargs)
exp = getattr(s.str, func)(*args, **kwargs)
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
else:
tm.assert_series_equal(res, exp)
invalid = Series([1, 2, 3]).astype('category')
with tm.assertRaisesRegexp(AttributeError,
"Can only use .str accessor with string"):
invalid.str
self.assertFalse(hasattr(invalid, 'str'))
def test_dt_accessor_api_for_categorical(self):
# https://github.com/pydata/pandas/issues/10661
from pandas.tseries.common import Properties
from pandas.tseries.index import date_range, DatetimeIndex
from pandas.tseries.period import period_range, PeriodIndex
from pandas.tseries.tdi import timedelta_range, TimedeltaIndex
s_dr = Series(date_range('1/1/2015', periods=5, tz="MET"))
c_dr = s_dr.astype("category")
s_pr = Series(period_range('1/1/2015', freq='D', periods=5))
c_pr = s_pr.astype("category")
s_tdr = Series(timedelta_range('1 days', '10 days'))
c_tdr = s_tdr.astype("category")
test_data = [
("Datetime", DatetimeIndex._datetimelike_ops, s_dr, c_dr),
("Period", PeriodIndex._datetimelike_ops, s_pr, c_pr),
("Timedelta", TimedeltaIndex._datetimelike_ops, s_tdr, c_tdr)]
self.assertIsInstance(c_dr.dt, Properties)
special_func_defs = [
('strftime', ("%Y-%m-%d",), {}),
('tz_convert', ("EST",), {}),
('round', ("D",), {}),
('floor', ("D",), {}),
('ceil', ("D",), {}),
# ('tz_localize', ("UTC",), {}),
]
_special_func_names = [f[0] for f in special_func_defs]
# the series is already localized
_ignore_names = ['tz_localize']
for name, attr_names, s, c in test_data:
func_names = [f
for f in dir(s.dt)
if not (f.startswith("_") or f in attr_names or f in
_special_func_names or f in _ignore_names)]
func_defs = [(f, (), {}) for f in func_names]
for f_def in special_func_defs:
if f_def[0] in dir(s.dt):
func_defs.append(f_def)
for func, args, kwargs in func_defs:
res = getattr(c.dt, func)(*args, **kwargs)
exp = getattr(s.dt, func)(*args, **kwargs)
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
elif isinstance(res, pd.Series):
tm.assert_series_equal(res, exp)
else:
tm.assert_numpy_array_equal(res, exp)
for attr in attr_names:
try:
res = getattr(c.dt, attr)
exp = getattr(s.dt, attr)
except Exception as e:
print(name, attr)
raise e
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
elif isinstance(res, pd.Series):
tm.assert_series_equal(res, exp)
else:
tm.assert_numpy_array_equal(res, exp)
invalid = Series([1, 2, 3]).astype('category')
with tm.assertRaisesRegexp(
AttributeError, "Can only use .dt accessor with datetimelike"):
invalid.dt
self.assertFalse(hasattr(invalid, 'str'))
def test_pickle_v0_14_1(self):
# we have the name warning
# 10482
with tm.assert_produces_warning(UserWarning):
cat = pd.Categorical(values=['a', 'b', 'c'],
categories=['a', 'b', 'c', 'd'],
name='foobar', ordered=False)
pickle_path = os.path.join(tm.get_data_path(),
'categorical_0_14_1.pickle')
# This code was executed once on v0.14.1 to generate the pickle:
#
# cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
# name='foobar')
# with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
#
self.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
def test_pickle_v0_15_2(self):
# ordered -> _ordered
# GH 9347
# we have the name warning
# 10482
with | tm.assert_produces_warning(UserWarning) | pandas.util.testing.assert_produces_warning |
# @Date: 2019-08-16T23:31:03+08:00
# @Email: <EMAIL>
# @Filename: MMCIF_unit.py
# @Last modified time: 2019-08-21T16:02:36+08:00
import pandas as pd
import numpy as np
import os, re, time, requests, sys
from urllib import request, error
from retrying import retry
from multiprocessing.dummy import Pool
from bs4 import BeautifulSoup
from Bio.PDB.MMCIF2Dict import MMCIF2Dict
sys.path.append('./')
from Unit import Unit
class MMCIF_unit(Unit):
CONFIG = {
'PDB_ID': 'pdb_id',
'MMCIF_OLD_FOLDER': ['/data1/suntt/process0606/cgc_mmcif_file/', '/data1/suntt/CanDriver/Data/PDB_cgc/cgc_mmcif_file/', '/data1/suntt/CanDriver/Data/PDB_NEW/mmcif_file/'],
'MMCIF_FOLDER': '/home/zzf/Work/SIFTS_Plus_Muta_Maps/data/mmcif_file/',
'OUTPUT_FOLDER': '../../data/Mapping_Pipeline/output_files/',
'HEADERS': {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134'},
'CHAIN_TYPE_FILE_LIST': ('cgc_pdb_chain_type.txt', 'cgc_pdb_chain_type_extra', 'cgc_chain_type_error_pdb.txt', 'output/cgc_pdb_chain_type_all_new.txt'),
'ATOM_SEQRES_FILE_LIST': ('cgc_pdb_atom_seqres.txt', 'cgc_seqres_atom_error_pdb.txt', 'output/cgc_pdb_atom_seqres_add_chain_type_new.txt', 'output/cgc_pdb_coordinates_site_range.txt'),
'LIGAND_FILE_LIST': ('ligand_info0605.txt', 'ligand_info_extra0605.txt', 'ligand_info_error.txt', 'output/ligand_info_final1.txt', 'output/ligand_info_final2.txt'),
'CHAIN_AND_SEQRES_FILE_LIST': ('output/cgc_pdb_atom_seqres_protein_chain_info.txt', 'cgc_protein_chain_id_in_pdb.txt', 'output/cgc_pdb_atom_seqres_info_integration.txt'),
'ADD_MODIFICATION_FILE': 'output/cgc_pdb_atom_seqres_info_integration_new.txt',
'ADD_MISSING_FILE': ('output/cgc_information_statistics1.txt', 'output/cgc_pdb_atom_seqres_info_integration_new_add_coor_site.txt'),
'PDB_MUTATION_FILE': 'output/pdb_mutation_info.txt',
'RESOLUTION_FILE': ('output/resoluton_error.txt', 'output/pdb_resoluton_info.txt'),
'YEAR_INFO_1_FILE': ('../../data/Mapping_Pipeline/output_files/pdb_date_info_newest.txt', '../../data/Mapping_Pipeline/output_files/pdb_date_error_newest.txt'),
'YEAR_INFO_2_FILE': ('../../data/Mapping_Pipeline/output_files/pdb_date_supp_newest.txt', '../../data/Mapping_Pipeline/output_files/pdb_date_error_supp_newest.txt'),
'YEAR_INFO_ALL': '../../data/Mapping_Pipeline/output_files/pdb_date_info_newest_all.txt',
'FINAL_FILE': ('output/cgc_pdb_atom_seqres_info_integration_final.txt', 'PDB_cgc/output/cgc_pdb_atom_seqres_info_integration_final0614.txt'),
'MMICF_USECOLS': ['pdb_id', 'chain_id', 'seqres_len', 'coordinates_len', 'Modification_position', 'ligand_position_in_seqres', 'mis_range', 'mis_index', 'Modification_num', 'mutation_num'],
}
def set_output_folder(self, path):
self.CONFIG['OUTPUT_FOLDER'] = path
def download_cif_file(pdbId, path):
url = 'https://files.rcsb.org/view/%s.cif' % pdbId
html = request.urlopen(url).read()
html = html.decode('utf-8')
with open(path, 'w') as fw:
fw.write(html)
time.sleep(2)
def get_mmcif_file_path(self, pdbId, download=False):
print('get_mmcif_file_path(): Working on [%s]' % pdbId)
new_path = '%s%s.cif' % (self.CONFIG['MMCIF_FOLDER'], pdbId)
for path in self.CONFIG['MMCIF_OLD_FOLDER']:
old_path = '%s%s.cif' % (path, pdbId)
if os.path.exists(old_path):
return old_path
if os.path.exists(new_path):
return new_path
else:
if download:
MMCIF_unit.download_cif_file(pdbId, new_path)
return False
else:
return new_path
'''
def download_mmcif_file(self):
# 暂时不用(zzf)
@retry(stop_max_attempt_number=3, wait_fixed=1000)
pool = Pool(processes=20)
pool.map(download_cif_file, self.pdb_list)
'''
def extract_chain_type_info(self):
chain_type_file1, chain_type_file2, chain_type_error, chain_type_file_all = MMCIF_unit.CONFIG['CHAIN_TYPE_FILE_LIST']
outpath = self.CONFIG['OUTPUT_FOLDER']
demo_dict_df_list = []
fw = open(outpath + chain_type_file2, 'w')
error_pdb_file = open(outpath + chain_type_error, 'w')
for pdbId in self.pdb_list:
# pdbFileSavePath = '%s%s.cif' % (MMCIF_unit.CONFIG['MMCIF_FOLDER'], pdbId)
pdbFileSavePath = self.get_mmcif_file_path(pdbId, True)
if not pdbFileSavePath:
continue
mmcif_dict = MMCIF2Dict(pdbFileSavePath)
demo_dict = {}
index = ['_entity_poly.type', '_entity_poly.pdbx_strand_id']
try:
for i in index:
demo_dict[i] = mmcif_dict[i]
df = pd.DataFrame(demo_dict)
df['pdb_id'] = pdbId
demo_dict_df_list.append(df)
except:
try:
fw.write('%s\t%s\t%s\n' % (
pdbId, mmcif_dict['_entity_poly.type'], mmcif_dict['_entity_poly.pdbx_strand_id']))
except:
error_pdb_file.write(pdbId + '\n')
demo_df = pd.concat(demo_dict_df_list)
demo_df.to_csv(outpath + chain_type_file1, sep='\t', index=False)
fw.close()
error_pdb_file.close()
# 将chain_type的信息合并到一起
info = pd.read_csv(outpath + chain_type_file1, sep='\t', dtype=str)
info1 = pd.read_csv(outpath + chain_type_file2, sep='\t', dtype=str,
names=['pdb_id', '_entity_poly.type', '_entity_poly.pdbx_strand_id'])
info2 = pd.concat([info, info1], axis=0)
info2.rename(columns={'_entity_poly.pdbx_strand_id': 'chain_id', '_entity_poly.type': 'chain_type_details'},
inplace=True)
info2['chain_type'] = info2['chain_type_details'].replace('polypeptide(L)', 'protein').replace('polypeptide(D)',
'protein').replace(
'polydeoxyribonucleotide', 'DNA').replace('polyribonucleotide', 'RNA').replace(
'polydeoxyribonucleotide/polyribonucleotide hybrid', 'RNA+DNA')
# info2.to_csv(outpath+'PDB_cgc/cgc_pdb_chain_type_all.txt',sep='\t',index=False)
#重新设置索引号,避免同一索引对应不同行,因为两个数据concat时各自文件的索引仍是之前的
info2.index = range(len(info2))
#由于重新设置了索引不会造成混淆,所以可以使用以下方法,比较快
result = info2.drop('chain_id', axis=1).join(
info2['chain_id'].str.split(',', expand=True).stack().reset_index(level=1, drop=True).rename('chain_id_new'))
info3 = result[['pdb_id', 'chain_type']].drop_duplicates()
info4 = info3.sort_values(by=['chain_type']).groupby(['pdb_id'], as_index=False).agg(lambda x: ','.join(x))
info4.rename(columns={'chain_type': 'pdb_contain_chain_type'}, inplace=True)
info5 = pd.merge(result, info4, on=['pdb_id'], how='left')
info5.to_csv(outpath + chain_type_file_all, sep='\t', index=False)
def extract_seqres_and_atom_info(self):
atom_seqres_file, atom_seqres_error, atom_seqres_chain_type_oringnal, coordinates_file = MMCIF_unit.CONFIG['ATOM_SEQRES_FILE_LIST']
outpath = self.CONFIG['OUTPUT_FOLDER']
chain_type_file_all = MMCIF_unit.CONFIG['CHAIN_TYPE_FILE_LIST'][3]
demo_dict_df_list = []
error_pdb_file = open(outpath + atom_seqres_error, 'w')
for pdbId in self.pdb_list:
# pdbFileSavePath = '%s%s.cif' % (MMCIF_unit.CONFIG['MMCIF_FOLDER'], pdbId)
pdbFileSavePath = self.get_mmcif_file_path(pdbId)
if not pdbFileSavePath:
continue
mmcif_dict = MMCIF2Dict(pdbFileSavePath)
demo_dict = {}
index = ['_pdbx_poly_seq_scheme.mon_id', '_pdbx_poly_seq_scheme.ndb_seq_num',
'_pdbx_poly_seq_scheme.pdb_seq_num', '_pdbx_poly_seq_scheme.auth_seq_num',
'_pdbx_poly_seq_scheme.pdb_mon_id', '_pdbx_poly_seq_scheme.auth_mon_id',
'_pdbx_poly_seq_scheme.pdb_strand_id', '_pdbx_poly_seq_scheme.pdb_ins_code']
try:
for i in index:
demo_dict[i] = mmcif_dict[i]
df = pd.DataFrame(demo_dict)
df['pdb_id'] = pdbId
demo_dict_df_list.append(df)
except:
error_pdb_file.write(pdbId + '\n')
demo_df1 = pd.concat(demo_dict_df_list)
demo_df1.to_csv(outpath + atom_seqres_file, sep='\t', index=False)
error_pdb_file.close()
# 将chain_type信息加入seqres和atom部分
file1 = pd.read_csv(outpath + atom_seqres_file, sep='\t', dtype=str)
file2 = pd.read_csv(outpath + chain_type_file_all, sep='\t', dtype=str)
file2.rename(columns={'chain_id_new': 'chain_id'}, inplace=True) # ?
file3 = pd.merge(file1, file2, left_on=['pdb_id', '_pdbx_poly_seq_scheme.pdb_strand_id'],
right_on=['pdb_id', 'chain_id'], how='left')
# file3.to_csv(outpath+'PDB_cgc/cgc_pdb_atom_seqres_add_chain_type.txt',sep='\t',index=False)
file3.rename(columns={'_pdbx_poly_seq_scheme.mon_id': 'SEQRES', '_pdbx_poly_seq_scheme.pdb_mon_id': 'Coordinates',
'_pdbx_poly_seq_scheme.ndb_seq_num': 'pdb_index',
'_pdbx_poly_seq_scheme.pdb_seq_num': 'position_in_seqres',
'_pdbx_poly_seq_scheme.auth_seq_num': 'position_in_coordinates',
'_pdbx_poly_seq_scheme.pdb_ins_code': 'inside_code'}, inplace=True)
file4 = file3.drop(['_pdbx_poly_seq_scheme.auth_mon_id', '_pdbx_poly_seq_scheme.pdb_strand_id'], axis=1)
file4.to_csv(outpath + atom_seqres_chain_type_oringnal, sep='\t', index=False)
# 加入coordinates_start和coordinates_end信息
coordinates_range = file4[file4['pdb_contain_chain_type'].notna() & file4['pdb_contain_chain_type'].str.contains('protein')]
coordinates_range['pdb_ins_position'] = coordinates_range['position_in_seqres'] + coordinates_range['inside_code']
coordinates_range['pdb_ins_position'] = coordinates_range['pdb_ins_position'].str.replace('.', '')
coordinates_range1 = coordinates_range.groupby(['pdb_id', 'chain_id'], as_index=False)['pdb_ins_position'].agg(
lambda x: ';'.join(x))
coordinates_range1.to_csv(outpath + coordinates_file, sep='\t', index=False)
def extract_pdb_ligand_info(self):
outpath = self.CONFIG['OUTPUT_FOLDER']
ligand_file1, ligand_file2, ligand_file_error, ligand_file_final1, ligand_file_final2 = MMCIF_unit.CONFIG['LIGAND_FILE_LIST']
atom_seqres_chain_type_oringnal = MMCIF_unit.CONFIG['ATOM_SEQRES_FILE_LIST'][2]
demo_dict_df_list = []
fw = open(outpath + ligand_file2,'w')
fp = open(outpath + ligand_file_error,'w')
for pdbId in self.pdb_list:
# pdbFileSavePath = '%s%s.cif' % (MMCIF_unit.CONFIG['MMCIF_FOLDER'], pdbId)
pdbFileSavePath = self.get_mmcif_file_path(pdbId)
if not pdbFileSavePath:
continue
mmcif_dict = MMCIF2Dict(pdbFileSavePath)
demo_dict = {}
index = ['_struct_conn.conn_type_id','_struct_conn.ptnr1_auth_asym_id','_struct_conn.ptnr1_auth_comp_id','_struct_conn.ptnr1_auth_seq_id',
'_struct_conn.ptnr2_auth_asym_id','_struct_conn.ptnr2_auth_comp_id','_struct_conn.ptnr2_auth_seq_id']
try:
for i in index:
demo_dict[i] = mmcif_dict[i]
df = pd.DataFrame(demo_dict)
df['pdb_id'] = pdbId
demo_dict_df_list.append(df)
except:
try:
fw.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n'%(pdbId,mmcif_dict['_struct_conn.conn_type_id'],mmcif_dict['_struct_conn.ptnr1_auth_asym_id'],mmcif_dict['_struct_conn.ptnr1_auth_comp_id'],
mmcif_dict['_struct_conn.ptnr1_auth_seq_id'],mmcif_dict['_struct_conn.ptnr2_auth_asym_id'],mmcif_dict['_struct_conn.ptnr2_auth_comp_id'],
mmcif_dict['_struct_conn.ptnr2_auth_seq_id']))
except:
fp.write(pdbId+'\n')
demo_df = pd.concat(demo_dict_df_list)
demo_df.to_csv(outpath + ligand_file1,sep='\t',index=False)
fw.close()
fp.close()
def ligand_count(ligand_ptnr_seq_id):
a = len(ligand_ptnr_seq_id.split(';'))
return a
def metal_check(connection_type):
if pd.isnull(connection_type):
return '0'
else:
if connection_type == 'metalc':
return '1'
else:
return '0'
# atom_seqres_chain_type_oringnal = 'output/cgc_pdb_atom_seqres_add_chain_type_new.txt'
ligand_info1 = pd.read_csv(outpath + ligand_file1, sep='\t', dtype=str, keep_default_na=False)
ligand_info2 = pd.read_csv(outpath + ligand_file2, sep='\t', dtype=str, keep_default_na=False,
names=['pdb_id', '_struct_conn.conn_type_id', '_struct_conn.ptnr1_auth_asym_id',
'_struct_conn.ptnr1_auth_comp_id', '_struct_conn.ptnr1_auth_seq_id',
'_struct_conn.ptnr2_auth_asym_id', '_struct_conn.ptnr2_auth_comp_id',
'_struct_conn.ptnr2_auth_seq_id'])
ligand_info_all = pd.concat([ligand_info1, ligand_info2], axis=0)
metal_ligand = ['ZN', 'MG', 'CA', 'FE', 'NA', 'MN', 'K', 'NI', 'CU', 'CO', 'CD', 'HG', 'PT', 'MO', 'BE', 'AL', 'BA',
'RU', 'SR', 'V', 'CS', 'W', 'AU', 'YB', 'LI', 'GD', 'PB', 'Y', 'TL', 'IR', 'RB', 'SM', 'AG',
'OS', 'PR', 'PD', 'EU', 'RH', 'RE', 'TB', 'TA', 'LU', 'HO', 'CR', 'GA', 'LA', 'SN', 'SB', 'CE',
'ZR',
'ER', 'TH', 'TI', 'IN', 'HF', 'SC', 'DY', 'BI', 'PA', 'PU', 'AM', 'CM', 'CF', 'GE', 'NB', 'TC',
'ND',
'PM', 'TM', 'PO', 'FR', 'RA', 'AC', 'NP', 'BK', 'ES', 'FM', 'MD', 'NO', 'LR', 'RF', 'DB', 'SG']
ligand_info_all1 = ligand_info_all[(ligand_info_all['_struct_conn.conn_type_id'] == 'metalc') & (
ligand_info_all['_struct_conn.ptnr1_auth_comp_id'].isin(metal_ligand))]
ligand_info_all1.rename(
columns={'_struct_conn.conn_type_id': 'connection_type', '_struct_conn.ptnr1_auth_asym_id': 'ligand_chain',
'_struct_conn.ptnr1_auth_comp_id': 'ligand_comp',
'_struct_conn.ptnr1_auth_seq_id': 'ligand_seq_id', '_struct_conn.ptnr2_auth_asym_id': 'chain_id',
'_struct_conn.ptnr2_auth_comp_id': 'ligand_ptnr_comp',
'_struct_conn.ptnr2_auth_seq_id': 'position_in_seqres'}, inplace=True)
ligand_info_all2 = ligand_info_all[(ligand_info_all['_struct_conn.conn_type_id'] == 'metalc') & (
ligand_info_all['_struct_conn.ptnr2_auth_comp_id'].isin(metal_ligand))]
ligand_info_all2.rename(
columns={'_struct_conn.conn_type_id': 'connection_type', '_struct_conn.ptnr2_auth_asym_id': 'ligand_chain',
'_struct_conn.ptnr2_auth_comp_id': 'ligand_comp',
'_struct_conn.ptnr2_auth_seq_id': 'ligand_seq_id', '_struct_conn.ptnr1_auth_asym_id': 'chain_id',
'_struct_conn.ptnr1_auth_comp_id': 'ligand_ptnr_comp',
'_struct_conn.ptnr1_auth_seq_id': 'position_in_seqres'}, inplace=True)
ligand_info_all3 = pd.concat([ligand_info_all1, ligand_info_all2], axis=0)
ligand_info_all3.reset_index(drop=True)
ligand_info_all3['ismetal'] = ligand_info_all3.apply(lambda x: metal_check(x.connection_type), axis=1)
ligand_info_all4 = ligand_info_all3.drop(['ligand_chain', 'ligand_seq_id'], axis=1)
ligand_info_all4 = ligand_info_all4.drop_duplicates()
data_index_position = pd.read_csv(outpath + atom_seqres_chain_type_oringnal, sep='\t', dtype=str)
data_index_position2 = data_index_position[
['pdb_id', 'chain_id', 'pdb_index', 'position_in_seqres']].drop_duplicates()
infomerge = pd.merge(ligand_info_all4, data_index_position2, how='left',
on=['pdb_id', 'chain_id', 'position_in_seqres'])
infomerge1 = infomerge[infomerge['pdb_index'].notna()]
infomerge1.to_csv(outpath + ligand_file_final1, sep='\t', index=False)
infomerge2 = infomerge1.groupby(['pdb_id', 'chain_id'], as_index=False)[
'ligand_comp', 'ligand_ptnr_comp', 'position_in_seqres', 'pdb_index', 'ismetal'].agg(lambda x: ';'.join(x))
infomerge2['ligand_count'] = infomerge2.apply(lambda x: ligand_count(x.position_in_seqres), axis=1)
infomerge2.to_csv(outpath + ligand_file_final2, sep='\t', index=False)
def deal_with_chain_and_seqres_atom(self):
def get_modification(m, k):
m1 = str(m).replace('?', '')
yes = [i + 1 for i, v in enumerate(m1) if v == 'X']
if yes != []:
length = int(len(yes))
if k == '1':
yes1 = str(yes).replace('[', '').replace(']', '').replace(' ','')
return yes1
elif k == '2':
return length
def get_modification_seqres_index(m):
# print m
yes = [i + 1 for i, v in enumerate(m) if v == 'X']
if yes != []:
yes1 = str(yes).replace('[', '').replace(']', '').replace(' ','')
return yes1
atom_seqres_protein_chain, cgc_protein_chain_id, integration_file = MMCIF_unit.CONFIG['CHAIN_AND_SEQRES_FILE_LIST']
outpath = self.CONFIG['OUTPUT_FOLDER']
atom_seqres_chain_type_oringnal = MMCIF_unit.CONFIG['ATOM_SEQRES_FILE_LIST'][2]
multiToOne = MMCIF_unit.MultiToOne()
f = pd.read_csv(outpath + atom_seqres_chain_type_oringnal, sep='\t', dtype=str)
f['SEQRES'] = f.apply(lambda x: multiToOne.multi_letter_convert_to_one_letter(x.SEQRES), axis=1)
f['Coordinates'] = f.apply(lambda x: multiToOne.multi_letter_convert_to_one_letter(x.Coordinates), axis=1)
##以下仅针对蛋白链
f1 = f[f['chain_type'] == 'protein']# .reset_index(drop=True)
f1.to_csv(outpath + atom_seqres_protein_chain, sep='\t', index=False)
# 将seqres信息放入一行
f2 = f1[['pdb_id', 'chain_id', 'SEQRES', 'inside_code']] # 此处不能去重
f3 = f2.groupby(['pdb_id', 'chain_id'], as_index=False)['SEQRES'].agg(lambda x: ''.join(x))
f3['seqres_len'] = f3['SEQRES'].str.len()
# 将coordinates信息放入一行
f4 = f1[['pdb_id', 'chain_id', 'Coordinates', 'inside_code']] # 此处不能去重
f5 = f4.groupby(['pdb_id', 'chain_id'], as_index=False)['Coordinates'].agg(lambda x: ''.join(x))
f5['coordinates_len'] = f5['Coordinates'].str.replace('?', '').str.len()
# 合并两部分信息
f6 = pd.merge(f3, f5, on=['pdb_id', 'chain_id'], how='left')
# 提取所有蛋白链的chain_id
allchain = f6[['pdb_id', 'chain_id']].drop_duplicates()
allchain1 = allchain.sort_values(by=['chain_id']).groupby(['pdb_id'], as_index=False).agg(lambda x: ','.join(x))
allchain1.rename(columns={'chain_id': 'pdb_protein_chain_id'}, inplace=True)
allchain1.to_csv(outpath + cgc_protein_chain_id, sep='\t', index=False)
##以下针对非蛋白链
ff1 = f[f['chain_type'] != 'protein']# .reset_index(drop=True)
if len(ff1) != 0:
ff2 = ff1[['pdb_id', 'chain_id', 'SEQRES', 'inside_code']] # 此处不能去重
ff3 = ff2.groupby(['pdb_id', 'chain_id'], as_index=False)['SEQRES'].agg(lambda x: ''.join(x))
ff3['seqres_len'] = ff3['SEQRES'].str.replace('D', '').str.len()
# 将coordinates信息放入一行
ff4 = ff1[['pdb_id', 'chain_id', 'Coordinates', 'inside_code']] # 此处不能去重
ff5 = ff4.groupby(['pdb_id', 'chain_id'], as_index=False)['Coordinates'].agg(lambda x: ''.join(x))
ff5['coordinates_len'] = ff5['Coordinates'].str.replace('D', '').str.replace('?', '').str.len()
# 合并两部分信息
ff6 = pd.merge(ff3, ff5, on=['pdb_id', 'chain_id'], how='left')
ff6['Coordinates'] = ff6['Coordinates'].str.replace('D', '')
full = pd.concat([f6, ff6], axis=0)
else:
full = f6
#将修饰信息加入文件中
full['Modification_position'] = full.apply(lambda x: get_modification(x.Coordinates, '1'), axis=1)
full['Modification_num'] = full.apply(lambda x: get_modification(x.Coordinates, '2'), axis=1)
full['Modification_position_seqres_index'] = full.apply(lambda x: get_modification_seqres_index(x.Coordinates),axis=1)
full.to_csv(outpath + integration_file, sep='\t', index=False)
def add_modification_pdb_type_to_integraton_file(self):
def modification_type(Modification_position,coordinates_len):
if pd.isnull(Modification_position):
modification_site='no_modification'
else:
modification_site=[]
modification_list = str(Modification_position).split(',')
for i in modification_list:
#判断modify的类型,i的类型定义为整型
if int(i) <= 5:
modify='start'
elif int(i) >= int(coordinates_len)-5:
modify='end'
else:
modify='middle'
#如果出现过就不放入list中
if modify not in modification_site:
modification_site.append(modify)
modification_site = ','.join(modification_site)
return modification_site
outpath = self.CONFIG['OUTPUT_FOLDER']
integration_file = MMCIF_unit.CONFIG['CHAIN_AND_SEQRES_FILE_LIST'][-1]
chain_type_file_all = MMCIF_unit.CONFIG['CHAIN_TYPE_FILE_LIST'][-1]
integration_file_new = MMCIF_unit.CONFIG['ADD_MODIFICATION_FILE']
# 将modification_type、chain_type、pdb_type信息加入到integration文件中
ff = pd.read_csv(outpath + integration_file, sep='\t')
ff['Modification_position'] = ff.apply(lambda x: x['Modification_position'].replace('[', '').replace(']', '').replace(' ', '') if isinstance(x['Modification_position'], str) else np.nan, axis=1)
ff['modification_site'] = ff.apply(lambda x: modification_type(x.Modification_position, x.coordinates_len), axis=1)
'''
pdb_type = pd.read_csv(outpath + pdb_and_sifts_protein_chain, sep='\t')
pdb_type1 = pdb_type[['pdb_id', 'pdb_type', 'pdb_protein_chain_id']].drop_duplicates()
ff1 = pd.merge(ff, pdb_type1, on=['pdb_id'], how='left')
'''
chain_type = pd.read_csv(outpath + chain_type_file_all, sep='\t', dtype=str)
chain_type.rename(columns={'chain_id_new': 'chain_id'}, inplace=True)
ff2 = pd.merge(ff, chain_type, on=['pdb_id', 'chain_id'], how='left') # ff1(before)
ff2.to_csv(outpath + integration_file_new, sep='\t', index=False)
def add_missing_coordinates_start_end(self):
def getmisindex(a):
str = a
word = '\\?'
b = [m.start() + 1 for m in re.finditer(word, str)]
if b != []:
return b
else:
return ''
def select_UNK(m):# m is the Seqres'content for one line
if len(set(m))==1 and '!' in list(set(m)):
return 'yes'
else:
return 'no'
def mis_or_not(a):
if '?' in a:
return 'yes'
else:
return 'no'
outpath = self.CONFIG['OUTPUT_FOLDER']
integration_file_new = MMCIF_unit.CONFIG['ADD_MODIFICATION_FILE']
all_chain_and_length, integration_new_missing_range = MMCIF_unit.CONFIG['ADD_MISSING_FILE']
coordinates_file = MMCIF_unit.CONFIG['ATOM_SEQRES_FILE_LIST'][-1]
ff = | pd.read_csv(outpath + integration_file_new, sep='\t', dtype=str) | pandas.read_csv |
#
# This program builds a SVM model to predict a loan payment default.
# It reads a labelled dataset of loan payments, makes the model, measures its accuracy and performs unit tests.
# It ends by a serialization through models. The serialized model is then used by the main program that serves it.
#
import os
import pandas as pd
import datetime
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from sklearn import svm
from sklearn import datasets
# Prepare data
iris = load_iris()
X = | pd.DataFrame(iris.data) | pandas.DataFrame |
#!/usr/bin/env python
import load_data as ld
from models import vae_models
import training
import utils
import utils_train_predict as utp
import sys
import numpy as np
import pandas as pd
from collections import defaultdict
import subprocess
import argparse
def analyse_model(out_dict, loss_df, summary_function, leave_out_y, yx_oh, yx_ind, model, train_index, test_index, vocab_list, ts_len, model_type, n_out):
# save loss
loss_df['TargetSequence'] = leave_out_y
out_dict['loss'].append(loss_df)
print('testing reconstruction of left out data')
yx_pred_test_ind = utp.predict_and_index(model, yx_oh[test_index], batch_size=10000)
recon_hamming = utp.hamming_distance_df(yx_pred_test_ind, yx_ind[test_index], ts_len, vocab_list)
recon_hamming['DataType'] = 'Reconstruction<>Truth'
# z search predictions
print('z search prediction')
out_samples = n_out
if model_type != 'MLP':
z_train = training.model_predict(model.encoder, yx_oh[train_index], 10000)
if model_type == 'MLP':
z_found = np.repeat(np.reshape(a=yx_oh[test_index[0],:ts_len], newshape=(1,ts_len*yx_oh.shape[2])), repeats=out_samples, axis=0)
elif model_type == 'VQ_VAE':
z_found, y_onehotdist = utp.z_search(decoder=model.decoder, z_values=z_train, compare_to_oh=yx_oh[test_index[0],:ts_len], ts_len=ts_len, n_sampling=40000, out_samples=out_samples, loops=3, zoom=0.25)
# sample from from embedding space for each z_dim and decode >> chose best match
elif model_type != 'CVAE' and model_type != 'MMD_CVAE':
z_found, y_onehotdist = utp.z_search(decoder=model.decoder, z_values=z_train, compare_to_oh=yx_oh[test_index[0],:ts_len], ts_len=ts_len, n_sampling=40000, out_samples=out_samples, loops=3, zoom=0.25)
else:
z_found = utp.z_unif_sampling(z_values=z_train, n_samples=out_samples)
ts_oh = np.repeat(np.reshape(a=yx_oh[test_index[0],:ts_len], newshape=(1,ts_len*yx_oh.shape[2])), repeats=out_samples, axis=0)
z_found = np.concatenate((ts_oh,z_found),1)
yx_pred_zsearch_ind = utp.predict_and_index(model.decoder, z_found, 0)
if model_type in ['CVAE','MMD_CVAE','MLP']:
ts_ind = np.repeat(np.reshape(a=yx_ind[test_index[0],:ts_len], newshape=(1,ts_len)), repeats=out_samples, axis=0)
yx_pred_zsearch_ind = np.concatenate((ts_ind,yx_pred_zsearch_ind),1) # add ts to output, cvae only gives recombinase sequences as output
# hamming distances of predictions to truth
pred_hamming = utp.hamming_distance_uneven_df(loop_array=yx_pred_zsearch_ind, array1=yx_ind[test_index], ts_len=ts_len, vocab_list=vocab_list, ts_labels=np.array([yx_ind[test_index[0],:ts_len]]), summarise_function=summary_function)
pred_hamming['DataType'] = 'Prediction<>Truth'
# hamming distances from closest neighbor library in training set to truth
ts_hamming = utils.np_hamming_dist(yx_ind[test_index[0],:ts_len], yx_ind[train_index,:ts_len])
closest_index = np.array(train_index)[ts_hamming == np.min(ts_hamming)]
closest_hamming = utp.hamming_distance_uneven_df(loop_array=yx_ind[closest_index], array1=yx_ind[test_index], ts_len=ts_len, vocab_list=vocab_list, ts_labels=np.array([yx_ind[test_index[0],:ts_len]]), summarise_function=summary_function)
closest_hamming['DataType'] = 'Closest<>Truth'
# combine hamming of prediction and closest lib
out_dict['prediction_hamming'].append( | pd.concat([recon_hamming, pred_hamming, closest_hamming]) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.