prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#Script to do a grid search of gas dump mass and gas dump time
#Compares against 4 different sets of ages - linear correct form astroNN; lowess correct from astroNN; Sanders & Das; APOKASC
import numpy as np
import matplotlib.pyplot as plt
import math
import h5py
import json
from astropy.io import fits
from astropy.table import Table, join
import pandas as pd
import subprocess
import os
import sys
sys.path.append('./scripts/')
from chemevo import *
data_file_1 = '/data/ktfm2/apogee_data/apogee_astroNN_DR16.fits' #The astroNN VAC for APOGEE DR16
hdf5_file = '/data/ktfm2/apogee_data/gaia_spectro.hdf5' #The hdf5 file for Sanders and Das
data_file_2 = '/data/jls/apokasc_astroNN.fits' #The APOKASC data file joined with AstroNN
hdf = h5py.File(hdf5_file, "r")
dataset = hdf['data']
log_age_data = dataset["log10_age"]
ID_data = dataset["APOGEE_ID"]
SD_table = Table([ID_data, log_age_data], names=('apogee_id', 'log_age_data'))
hdu_list_1 = fits.open(data_file_1, memmap=True) #open fits file
apogee_data = Table(hdu_list_1[1].data) #Creates table from fits file
hdu_list_1.close() #Close the fits file
hdu_list_2 = fits.open(data_file_2, memmap=True) #open fits file
apokasc_data = Table(hdu_list_2[1].data) #Creates table from fits file
hdu_list_2.close() #Close the fits file
#Join tables together
full_table = join(apogee_data, SD_table)
#Define functions for the filter
def betw(x,l,u):
return (x>l)&(x<u)
def outs(x,l,u):
return (x<l)|(x>u)
#Define filter for apogee data, use guiding centre radius RL, galactic height GALZ, surface gravity LOGG
#Have 4 different filters and so on for linear age, lowess age, S&D age, APOKASC age - this extends to have disc stars
NaN_bit1 = (~pd.isna(apogee_data['rl']))&(~pd.isna(apogee_data['age_lowess_correct']))&(~pd.isna(apogee_data['GALZ']))&(~pd.isna(apogee_data['FE_H']))&(~pd.isna(apogee_data['FE_H_ERR']))&(~pd.isna(apogee_data['MG_H']))&(~pd.isna(apogee_data['MG_H_ERR']))&(~pd.isna(apogee_data['LOGG']))
fltr1 = NaN_bit1&(apogee_data['age_lowess_correct']>0.0)&(apogee_data['LOGG']<3.5)&(betw(apogee_data['GALZ'],-5.0,5.0))&(outs(apogee_data['GALZ'],-1.0,1.0))&(apogee_data['FE_H_ERR']<0.2)&(betw(apogee_data['rl'],7.6,8.6))
NaN_bit2 = (~pd.isna(apogee_data['rl']))&(~pd.isna(apogee_data['age_linear_correct']))&(~pd.isna(apogee_data['GALZ']))&(~pd.isna(apogee_data['FE_H']))&(~pd.isna(apogee_data['FE_H_ERR']))&(~pd.isna(apogee_data['MG_H']))&(~pd.isna(apogee_data['MG_H_ERR']))&(~pd.isna(apogee_data['LOGG']))
fltr2 = NaN_bit2&(apogee_data['age_linear_correct']>0.0)&(apogee_data['LOGG']<3.5)&(betw(apogee_data['GALZ'],-5.0,5.0))&(outs(apogee_data['GALZ'],-1.0,1.0))&(apogee_data['FE_H_ERR']<0.2)&(betw(apogee_data['rl'],7.6,8.6))
NaN_bit3 = (~pd.isna(full_table['rl']))&(~pd.isna(full_table['log_age_data']))&(~pd.isna(full_table['GALZ']))&(~pd.isna(full_table['FE_H']))&(~pd.isna(full_table['FE_H_ERR']))&(~pd.isna(full_table['MG_H']))&(~pd.isna(full_table['MG_H_ERR']))&(~pd.isna(full_table['LOGG']))
fltr3 = NaN_bit3&(full_table['LOGG']<3.5)&(betw(full_table['GALZ'],-5.0,5.0))&(outs(full_table['GALZ'],-1.0,1.0))&(full_table['FE_H_ERR']<0.2)&(betw(full_table['rl'],7.6,8.6))
NaN_bit4 = (~ | pd.isna(apokasc_data['rl']) | pandas.isna |
import numpy as np
import pandas as pd
import re
from itertools import chain
def import_csvs(pbp_cols, year=2018, weeks=16, encoding="ISO-8859-1"):
for week in range(1, weeks + 1):
df = pd.DataFrame(
pd.read_csv(f"../data/raw/pbp/{year} Week {week}.csv", encoding=encoding),
columns=pbp_cols,
)
df.fillna({"clock.minutes": 0, "clock.seconds": 0}, inplace=True)
df["week"] = week
df = df.sort_values(["id"])
df["id"] = df["id"].floordiv(1000000000)
data = data.append(df, ignore_index=True)
# finish establishing the data object and returning it from the function
def get_pbp(
pbp_cols=[
"id",
"week",
"offense",
"offense_conference",
"defense",
"defense_conference",
"offense_score",
"defense_score",
"drive_id",
"period",
"clock.minutes",
"clock.seconds",
"yard_line",
"down",
"distance",
"yards_gained",
"play_type",
"play_text",
],
matchup_cols=["id", "home_team", "away_team", "neutral_site", "conference_game"],
export_missing=True,
):
# Import Play-by-Play and Matchup data #
# Initialize necessary DataFrames
data = pd.DataFrame(columns=pbp_cols)
locations = | pd.DataFrame(columns=matchup_cols) | pandas.DataFrame |
from urllib.request import urlopen
from http.cookiejar import CookieJar
from io import StringIO
from app.extensions import cache
from app.api.constants import PERMIT_HOLDER_CACHE, DORMANT_WELLS_CACHE, LIABILITY_PER_WELL_CACHE, TIMEOUT_15_MINUTES, TIMEOUT_60_MINUTES, TIMEOUT_12_HOURS, TIMEOUT_1_YEAR
from flask import Flask, current_app
from threading import Thread
import requests
import urllib
import pandas as pd
import pyarrow as pa
import time
from .ogc_data_constants import PERMIT_HOLDER_CSV_DATA, DORMANT_WELLS_CSV_DATA, LIABILITY_PER_WELL_CSV_DATA
# TODO: Stick into environment variables
PERMIT_HOLDER_CSV = 'http://reports.bcogc.ca/ogc/f?p=200:201:14073940726161:CSV::::'
DORMANT_WELLS_CSV = 'https://reports.bcogc.ca/ogc/f?p=200:81:9680316354055:CSV::::'
LIABILITY_PER_WELL_CSV = 'https://reports.bcogc.ca/ogc/f?p=200:10:10256707131131:CSV::::'
session = requests.session()
def refreshOGCdata(app, cache_key, csv_url, process):
with app.app_context():
serializer = pa.default_serialization_context()
data = cache.get(cache_key)
expiry_token = cache.get(cache_key + '_EXPIRY_TOKEN')
if not expiry_token:
current_app.logger.debug(f'OGC DATA SERVICE - {cache_key} - Cached data not found.')
# set 15 minute token to mitigate multiple threads requesting data at the same time
cache.set(cache_key + '_EXPIRY_TOKEN', True, timeout=TIMEOUT_15_MINUTES)
else:
current_app.logger.debug(f'OGC DATA SERVICE - {cache_key} - Cached data up to date.')
return
try:
cookieProcessor = urllib.request.HTTPCookieProcessor()
opener = urllib.request.build_opener(cookieProcessor)
response = session.get(csv_url)
df = pd.read_table(StringIO(response.text), sep=",")
df = process(df)
updated_from_web = True
current_app.logger.debug(
f'OGC DATA SERVICE - {cache_key} - Successful get from OGC reporting.')
df = process(df)
except:
# on error, if we don't have data in the cache initialize it from static content
if not data:
current_app.logger.debug(
f'OGC DATA SERVICE - {cache_key} - Falling back to static content.')
if cache_key is PERMIT_HOLDER_CACHE:
df = pd.read_table(StringIO(PERMIT_HOLDER_CSV_DATA), sep=",")
if cache_key is DORMANT_WELLS_CACHE:
df = pd.read_table(StringIO(DORMANT_WELLS_CSV_DATA), sep=",")
if cache_key is LIABILITY_PER_WELL_CACHE:
df = pd.read_table(StringIO(LIABILITY_PER_WELL_CSV_DATA), sep=",")
df = process(df)
row_count = df.shape[0]
# only update cache if there is a good dataset
if row_count > 1:
current_app.logger.debug(f'OGC DATA SERVICE - {cache_key} - Updating cached data.')
cache.set(
cache_key,
serializer.serialize(df).to_buffer().to_pybytes(),
timeout=TIMEOUT_1_YEAR)
if updated_from_web:
cache.set(cache_key + '_EXPIRY_TOKEN', True, timeout=TIMEOUT_60_MINUTES)
else:
current_app.logger.warning(
f'OGC DATA SERVICE - {cache_key} - FAILED TO RETRIEVE UPDATED DATA')
class OGCDataService():
@classmethod
def refreshAllData(cls):
cls.getPermitHoldersDataFrame()
cls.getDormantWellsDataFrame()
cls.getLiabilityPerWellDataFrame()
@classmethod
def getOGCdataframe(cls, cache_key, csv_url, process):
serializer = pa.default_serialization_context()
data = cache.get(cache_key)
app = current_app._get_current_object()
#if empty dataset refresh data synchronously, otherwise refresh in the background and continue
if not data:
df = refreshOGCdata(app, cache_key, csv_url, process)
else:
thread = Thread(
target=refreshOGCdata, args=(
app,
cache_key,
csv_url,
process,
))
thread.daemon = True
thread.start()
#update data and return
data = cache.get(cache_key)
if data:
df = serializer.deserialize(data)
return df
@classmethod
def getPermitHoldersDataFrame(cls):
def process(df):
df.columns = [
'operator_id', 'organization_name', 'phone_num', 'address_line_1', 'address_line_2',
'city', 'province', 'postal_code', 'country'
]
return df
return cls.getOGCdataframe(PERMIT_HOLDER_CACHE, PERMIT_HOLDER_CSV, process)
@classmethod
def getDormantWellsDataFrame(cls):
def process(df):
df.columns = [
'operator_name', 'operator_id', 'well_auth_number', 'well_name', 'dormant_status',
'current_status', 'well_dormancy_date', 'site_dormancy_date', 'site_dormancy_type',
'site_dormant_status', 'surface_location', 'field', 'abandonment_date',
'last_spud_date', 'last_rig_rels_date', 'last_completion_date',
'last_active_production_year', 'last_active_inj_display_year',
'wellsite_dormancy_declaration_date', 'multi_well'
]
df['well_dormancy_date'] = pd.to_datetime(
df['well_dormancy_date'],
errors='coerce').apply(lambda x: x.strftime('%Y-%m-%d') if pd.notnull(x) else None)
df['site_dormancy_date'] = pd.to_datetime(
df['site_dormancy_date'],
errors='coerce').apply(lambda x: x.strftime('%Y-%m-%d') if pd.notnull(x) else None)
df['abandonment_date'] = pd.to_datetime(
df['abandonment_date'],
errors='coerce').apply(lambda x: x.strftime('%Y-%m-%d') if | pd.notnull(x) | pandas.notnull |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 4 14:39:07 2021
This scripts tests for the (in)dependence between tide and skew surge
@author: acn980
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import os,sys,glob
import scipy.stats as sp
import statsmodels.api as sm
sys.path.insert(0,r'E:\github\seasonality_risk\Functions')
from Functions_HCMC import get_skew_surge
#%%
save = False
fn_trunk = 'E:/surfdrive/Documents'
fn = os.path.join(fn_trunk, 'Master2019\Thomas\data\matlab_csv')
fn_files = 'Master2019/Thomas/data'
fn2 = os.path.join(fn_trunk,fn_files)
#%% We import the total water level and tide to obtain the high tide and skew surge
#We import the tide
fn_tide = os.path.join(fn,'Tide_WACC_VungTau_Cleaned_Detrended_Strict_sel_const.csv')
date_parser = lambda x: | pd.datetime.strptime(x, "%d-%m-%Y %H:%M:%S") | pandas.datetime.strptime |
from flask import render_template, flash, redirect, url_for, request, send_file, send_from_directory
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from werkzeug.urls import url_parse
from app import app
from app.forms import LoginForm
import boto3
from flask_login import current_user, login_user, logout_user, login_required
from app.models import User
import csv
import shutil
import requests
import json
import os
import pandas as pd
import numpy as np
import exifread
s3_client = boto3.client('s3')
bucket_name = 'w210-img-upload'
s3_resource = boto3.resource('s3')
my_bucket = s3_resource.Bucket(bucket_name)
db_string = "postgres://dbmaster:dbpa$$w0rd!@w210postgres01.c8siy60gz3hg.us-east-1.rds.amazonaws.com:5432/w210results"
def df_to_geojson(df, properties, lat='Lat', lon='Long'):
geojson = {'type':'FeatureCollection', 'features':[]}
for _, row in df.iterrows():
feature = {'type':'Feature',
'properties':{},
'geometry':{'type':'Point',
'coordinates':[]}}
feature['geometry']['coordinates'] = [row[lon],row[lat]]
for prop in properties:
feature['properties'][prop] = row[prop]
geojson['features'].append(feature)
return geojson
def gpsParser(x):
if x == 0.0:
return 0.0
else:
degrees = int(x[1:-1].split(',')[0])
try:
minNumerator = int(x[1:-1].split(',')[1].split('/')[0])
minDenominator = int(x[1:-1].split(',')[1].split('/')[1])
except IndexError:
minNumerator = int(x[1:-1].split(',')[1].split('/')[0])
minDenominator = 1.0
try:
secNumerator = int(x[1:-1].split(',')[2].split('/')[0])
secDenominator = int(x[1:-1].split(',')[2].split('/')[1])
except IndexError:
secNumerator = int(x[1:-1].split(',')[2].split('/')[0])
secDenominator = 1.0
deciMinutes = minNumerator/minDenominator/60
deciSeconds = secNumerator/secDenominator/3600
return(np.round(degrees+deciMinutes+deciSeconds,6))
def exifExtractor(file):
image = open(file, 'rb')
tags = exifread.process_file(image)
gpsInfo = {'fileName': image.name.lower().split('/')[-1]}
for k in ['GPS GPSLatitudeRef', 'GPS GPSLatitude', 'GPS GPSLongitudeRef', 'GPS GPSLongitude']:
try:
gpsInfo[k] = str(tags[k])
except KeyError:
gpsInfo[k] = 0.0
return gpsInfo
def formatLabel(x):
if x == 'american_black_bear':
return 'Black bear'
elif x == 'domestic_cow':
return 'Cow'
elif x == 'domestic_dog':
return 'Dog'
elif x == 'gray_fox':
return 'Gray fox'
elif x == 'red_fox':
return 'Red fox'
elif x == 'white_tailed_deer':
return 'White-tailed deer'
elif x == 'mule_deer':
return 'Mule deer'
elif x == 'wild_turkey':
return 'Wild turkey'
elif x == 'red_deer':
return 'Elk'
else:
return x.capitalize()
def purge_local(dir):
excepts = []
prefix = current_user.username+'/'
my_bucket.objects.filter(Prefix=prefix).delete()
engine = create_engine(db_string, echo=True)
connection = engine.connect()
connection.execute("DROP TABLE IF EXISTS {}".format('test_upload'))
connection.close()
engine.dispose()
for file in os.listdir(dir):
file_path = os.path.join(dir,file)
try:
if os.path.isfile(file_path):
os.remove(file_path)
except Exception as e:
excepts.append(e)
return excepts
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html', title='Home')
@app.route('/about')
def about():
return render_template('about.html', title='About Us')
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('upload'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('upload')
return redirect(next_page)
return render_template('login_page.html', title='Sign In', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/upload', methods=['GET', 'POST'])
@login_required
def upload():
check_file = app.config['DOWNLOAD_FOLDER']+current_user.username+'/'+current_user.username+'_results.csv'
if os.path.isfile(check_file):
purge_local(os.path.join(app.config['DOWNLOAD_FOLDER'],current_user.username))
if request.method == 'POST':
data_files = request.files.getlist('file[]')
for data_file in data_files:
name, ext = os.path.splitext(data_file.filename)
if ext.lower() in [".jpg",".jpeg",".png"]:
filename_old = current_user.username+'/upload/'+data_file.filename
filename_new = filename_old.lower()
s3_client.upload_fileobj(data_file, bucket_name, filename_new)
print("Uploading "+data_file.filename+" to "+bucket_name+".")
else:
pass
upload_dir = '/home/ubuntu/s3bucket/'+current_user.username+'/upload/'
dfGPSRaw = pd.DataFrame()
for file in os.listdir(upload_dir):
df_tmp = pd.DataFrame.from_dict([exifExtractor(os.path.join(upload_dir,file))],orient='columns')
dfGPSRaw = dfGPSRaw.append(df_tmp)
dfGPSRaw['LatRef'] = dfGPSRaw['GPS GPSLatitudeRef'].apply(lambda x: 1 if x == 'N' else -1)
dfGPSRaw['LonRef'] = dfGPSRaw['GPS GPSLongitudeRef'].apply(lambda x: 1 if x == 'E' else -1)
dfGPSRaw['Lat'] = dfGPSRaw['GPS GPSLatitude'].apply(gpsParser)*dfGPSRaw['LatRef']
dfGPSRaw['Long'] = dfGPSRaw['GPS GPSLongitude'].apply(gpsParser)*dfGPSRaw['LonRef']
dfGPStmp = dfGPSRaw[['fileName','Lat', 'Long']]
dfGPStmp = dfGPStmp.set_index('fileName')
geotags_file = app.config['DOWNLOAD_FOLDER']+current_user.username+'/geotags.csv'
if os.path.isfile(geotags_file):
dfGPSold = pd.read_csv(geotags_file)
dfGPSnew = pd.concat([dfGPSold, dfGPStmp])
dfGPSnew.to_csv(geotags_file)
else:
dfGPStmp.to_csv(geotags_file)
return redirect(url_for('complete'))
else:
username = current_user.username
return render_template('upload.html', title='File Upload', username = username)
@app.route('/complete', methods=['GET', 'POST'])
@login_required
def complete():
if request.method == "POST":
if 'upload_again' in request.form:
return redirect(url_for('upload'))
elif 'launcher' in request.form:
return redirect(url_for('classify'))
else:
return render_template('complete.html', title='Thank You!')
@app.route('/output', methods=['GET', 'POST'])
@login_required
def output():
engine = create_engine(db_string, echo=True)
Base = declarative_base(engine)
output_file = app.config['DOWNLOAD_FOLDER']+current_user.username+'/'+current_user.username+'_results.csv'
# os.remove(output_file)
class Results(Base):
__tablename__ = 'test_upload'
# __tablename__ = 'dummy_table'
# __tablename__ = str(current_user.username + '_results')
__table_args__ = {'autoload':True}
metadata = Base.metadata
Session = sessionmaker(bind=engine)
session = Session()
qry = session.query(Results)
with open(output_file, 'w') as csvfile:
outcsv = csv.writer(csvfile, delimiter=',',quotechar='"', quoting = csv.QUOTE_MINIMAL)
header = Results.__table__.columns.keys()
outcsv.writerow(header)
for record in qry.all():
outcsv.writerow([getattr(record, c) for c in header ])
df_results = pd.read_csv(output_file)
df_resTransform = df_results.loc[df_results.groupby(['fileName'])['probability'].idxmax()]
dfGPS = pd.read_csv(app.config['DOWNLOAD_FOLDER']+current_user.username+'/geotags.csv')
df_output = | pd.merge(df_resTransform, dfGPS, on='fileName') | pandas.merge |
import time
import d2l.torch
import numpy as np
import pandas as pd
import torch.nn as nn
import torch
import torchsummary
import torchvision.io
from torchvision import datasets
from torchvision import transforms
from torch.utils import data
import matplotlib.pyplot as plt
import torch.nn.functional as F
from sklearn.preprocessing import LabelEncoder
from PIL import Image
import os
import kaggle
class residual(nn.Module):
def __init__(self, inputChannels, outputChannels, stride=1):
super(residual, self).__init__()
self.conv1 = nn.Conv2d(inputChannels, outputChannels, kernel_size=(3, 3), stride=stride, padding=(1, 1))
self.conv2 = nn.Conv2d(outputChannels, outputChannels, kernel_size=(3, 3), padding=(1, 1))
if stride != 1:
self.conv3 = nn.Conv2d(inputChannels, outputChannels, stride=stride, kernel_size=(1, 1))
else:
self.conv3 = None
self.bn1 = nn.BatchNorm2d(outputChannels)
self.bn2 = nn.BatchNorm2d(outputChannels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
y = self.relu(self.bn1(self.conv1(x)))
y = self.bn2(self.conv2(y))
if self.conv3:
x = self.conv3(x)
y += x
return self.relu(y)
def initParameters(m):
if type(m) == nn.Linear or type(m) == nn.Conv2d:
nn.init.xavier_uniform_(m.weight)
b1 = nn.Sequential(nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(2, 2)),
nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(kernel_size=(3, 3), stride=2, padding=1))
b1.apply(initParameters)
def resnetBlock(inputChannel, outputChannel, numResidual, first=False):
blk = []
for _ in range(numResidual):
if not first and _ == 0:
blk.append(residual(inputChannel, outputChannel, 2))
else:
blk.append(residual(outputChannel, outputChannel))
return blk
b2 = nn.Sequential(*resnetBlock(64, 64, 2, first=True))
b3 = nn.Sequential(*resnetBlock(64, 128, 2))
b4 = nn.Sequential(*resnetBlock(128, 256, 2))
b5 = nn.Sequential(*resnetBlock(256, 512, 2))
b2.apply(initParameters)
b3.apply(initParameters)
b4.apply(initParameters)
b5.apply(initParameters)
net = nn.Sequential(b1, b2, b3, b4, b5, nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten(),
nn.Linear(512, 176), nn.Softmax(dim=0))
net.apply(initParameters)
class DataSets(torch.utils.data.Dataset):
def __init__(self, isTrain):
self.features, self.labels = getData(isTrain)
self.isTrain = isTrain
def __getitem__(self, item):
if self.isTrain:
return self.features[item].float(), self.labels[item]
else:
return self.features[item].float()
def __len__(self):
return len(self.features)
labelNum = 0
def getData(isTrain):
global labelNum
trainIdx = pd.read_csv('/beginner/classification/data/leavesClassification\\miniImages.csv')
testIdx = pd.read_csv('/data/leavesClassification/test.csv')
le = LabelEncoder()
label = torch.tensor(le.fit_transform(trainIdx['label']), dtype=torch.long)
labelNum = len(label.unique())
dataBase = 'D:\\torchProjects\\data\\leavesClassification\\'
Data = []
if isTrain:
idx = trainIdx.set_index('image')
else:
label = None
idx = testIdx.set_index('image')
for Idx, (imgName, target) in enumerate(idx.iterrows()):
if not Idx % 1000:
print(Idx)
Data.append(torchvision.io.read_image(dataBase + imgName))
return Data, label
def loadData(train=True):
Iter = data.DataLoader(DataSets(train), 64 if not train else batchSize)
return Iter
def accuracy(predict, y):
mask = torch.ones(len(y))
mask = mask[predict.max(axis=1).indices == y]
return float(sum(mask))
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
lr, numEpoch, batchSize = 0.22, 200, 64
# newNet = torchvision.models.resnet50(True)
# newNet.fc = nn.Linear(newNet.fc.in_features, labelNum)
# nn.init.xavier_uniform_(newNet.fc.weight)
def main():
# torchsummary.summary(net, (3, 64, 64), device='cpu')
# input()
trainIter = loadData(True)
# trainIter = [100, 10]
print("已读取完数据,使用{}".format(device))
loss = nn.CrossEntropyLoss()
# paramList = [param for name, param in newNet.named_parameters() if name not in ['fc.weight', 'fc.bias']]
# optimizer = torch.optim.SGD([{'params': paramList}, {'params': newNet.fc.parameters(), 'lr': 0.1}], lr=0.01)
net.load_state_dict(torch.load('./savedModel11.pkl'))
# net[7] = nn.Sequential(nn.Linear(512, 256),
# nn.BatchNorm1d(256),
# nn.ReLU(),
# nn.Dropout(),
# nn.Linear(256, 176))
# net[7].apply(initParameters)
newParamsId = list(map(id, net[7].parameters()))
baseParams = filter(lambda x: id(x) not in newParamsId, net.parameters())
# net.to(device)
flag = 0
lrList = [0.002, 0.0005]
threshList = [50]
lastScore = 0
for epoch in range(numEpoch):
L = 0
accumulator = [0, 0, 0]
optimizer = torch.optim.SGD([{'params': baseParams},
{'params': net[7].parameters(), 'lr': lrList[flag] * 200}],
lr=lrList[flag])
if flag < len(threshList) and epoch > threshList[flag]:
flag += 1
optimizer = torch.optim.SGD([{'params': baseParams},
{'params': net[7].parameters(), 'lr': lrList[flag] * 200}],
lr=lrList[flag])
for X, y in trainIter:
# X, y = X.to(device), y.to(device)
optimizer.zero_grad()
predict = torch.argmax(net(X), dim=1)
l = loss(predict, y)
l.backward()
optimizer.step()
L += l * len(y)
accumulator[0] += float(l) * len(y)
accumulator[1] += accuracy(predict, y)
accumulator[2] += len(y)
print(f'loss on train {accumulator[0] / accumulator[2]}, accu on train {accumulator[1] / accumulator[2]}')
if accumulator[1] / accumulator[2] > 0.85 and accumulator[1] / accumulator[2] > lastScore:
lastScore = accumulator[1] / accumulator[2]
torch.save(net.state_dict(), './savedModel' + str(epoch) + '.pkl')
# net.load_state_dict(torch.load('./savedModel.pkl'))
# if accumulator[0] / accumulator[2] > bestScore:
# bestScore = accumulator[0] / accumulator[2]
# torch.save(net.state_dict(), './resNet18' + str(epoch) + '.pt')
# pltX.append(epoch)
# pltY.append(accumulator[0] / accumulator[2])
# pltAccu.append(accumulator[1] / accumulator[2])
# plt.plot(pltX, pltY, 'k')
# plt.show()
# plt.pause(0.01)
def test():
net.load_state_dict(torch.load('./savedModel11.pkl', map_location='cpu'))
# net.to('cuda')
trainCSV = pd.read_csv('/data/leavesClassification/train.csv')
testIter = loadData(False)
le = LabelEncoder()
label = pd.DataFrame(le.fit_transform(trainCSV['label']))
benchmark = | pd.concat((label, trainCSV), axis=1) | pandas.concat |
"""
Module for calling the FEWS REST API.
The module contains one class and methods corresponding with the FEWS PI-REST requests:
https://publicwiki.deltares.nl/display/FEWSDOC/FEWS+PI+REST+Web+Service#FEWSPIRESTWebService-GETtimeseries
"""
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
import requests
from fewsbokeh.time import Timer
_GEODATUM_MAPPING = {"WGS 1984": "epsg:4326", "Rijks Driehoekstelsel": "epsg:28992"}
_REQUEST_PARAMETERS_ALLOWED = {
"timeseries": [
"locationIds",
"startTime",
"endTime",
"filterId",
"parameterIds",
"qualifierIds",
"documentVersion",
"thinning",
"onlyHeaders",
"showStatistics"
]
}
class Api:
"""
FEWS PI-REST api it needs an server url and a logger.
All variables related to PI-REST variables are defined camelCase. All others are
snake_case.
"""
def __init__(self, url, logger, filterId, ssl_verify=False):
self.document_format = "PI_JSON"
self.url = url
self.parameters = None
self.locations = None
self.logger = logger
self.timer = Timer(logger)
self.ssl_verify = ssl_verify
self._get_parameters(filterId)
self._get_locations(filterId)
def _get_parameters(self, filterId):
self.parameters = self.get_parameters(filterId=filterId)
def _get_locations(self, filterId):
self.locations = self.get_locations(filterId=filterId)
def get_parameters(self, filterId):
rest_url = f"{self.url}parameters"
parameters = dict(filterId=filterId,
documentFormat=self.document_format)
self.timer.reset()
response = requests.get(rest_url, parameters, verify=self.ssl_verify)
self.timer.report("Parameters request")
print(response.url)
if response.status_code == 200:
if "timeSeriesParameters" in response.json().keys():
par_df = pd.DataFrame(response.json()["timeSeriesParameters"])
par_df.set_index("id", inplace=True)
result = par_df
else:
result = None
self.timer.report("Parameters parsed")
else:
self.logger.error(f"FEWS Server responds {response.text}")
return result
def get_filters(self, filterId=None):
"""Get filters as dictionary, or sub-filters if a filterId is specified."""
rest_url = f"{self.url}filters"
parameters = {"documentFormat": self.document_format, "filterId": filterId}
self.timer.reset()
response = requests.get(rest_url, parameters, verify=self.ssl_verify)
self.timer.report("Filters request")
if response.status_code == 200:
if "filters" in response.json().keys():
filters = {
item["id"]: {
key: value for key, value in item.items() if not key == "id"
}
for item in response.json()["filters"]
}
result = filters
else:
self.logger.warning('no filter returned')
result = None
self.timer.report("Filters parsed")
return result
def get_headers(self, filterId, endTime, parameterIds=None, locationIds=None):
"""Get parameters. FilterId is required. A list of locations optional."""
result = None
if not parameterIds:
parameterIds = self.parameters.index.to_list()
timeseries = self.get_timeseries(
filterId=filterId,
locationIds=locationIds,
startTime=endTime,
endTime=endTime,
parameterIds=parameterIds,
onlyHeaders=True,
showStatistics=True
)
if "timeSeries" in timeseries.keys():
result = [ts['header'] for ts in timeseries["timeSeries"]]
# ids = list(set([
# series["header"]["parameterId"] for series in timeseries["timeSeries"]])
# )
# timesteps = {item: [] for item in ids}
# for series in timeseries["timeSeries"]:
# parameter_id = series["header"]["parameterId"]
# timesteps[parameter_id] += [series["header"]["timeStep"]]
# timesteps = {key: list(map(dict, set(tuple(sorted(
# d.items())) for d in value))) for key, value in timesteps.items()}
# qualifiers = {item: [] for item in ids}
# for series in timeseries["timeSeries"]:
# parameter_id = series["header"]["parameterId"]
# if "qualifierId" in series["header"].keys():
# qualifiers[parameter_id] += [series["header"]["qualifierId"]]
# qualifiers = {key: [
# list(x) for x in set(tuple(x) for x in value)]
# for key, value in qualifiers.items()}
else:
self.logger.warning(
f"no timeSeries in filter {filterId} for locations {locationIds}"
)
result = None
return result
def to_parameter_names(self, parameterIds):
"""Convert parameterIds to names."""
return self.parameters.loc[parameterIds]["name"].to_list()
def to_parameter_ids(self, names):
"""Convert parameterIds to parameterIds."""
return self.parameters.loc[self.parameters["name"].isin(names)].index.to_list()
def get_locations(self,
showAttributes=False,
filterId=None,
includeLocationRelations=False):
"""Get location en return as a GeoDataFrame."""
rest_url = f"{self.url}locations"
parameters = dict(
documentFormat=self.document_format,
showAttributes=showAttributes,
filterId=filterId,
includeLocationRelations=includeLocationRelations
)
self.timer.reset()
response = requests.get(rest_url, parameters, verify=self.ssl_verify)
self.timer.report("Locations request")
gdf = gpd.GeoDataFrame()
if response.status_code == 200:
gdf = gpd.GeoDataFrame(response.json()["locations"])
gdf["geometry"] = gdf.apply(
(lambda x: Point(float(x["x"]), float(x["y"]))), axis=1
)
gdf.crs = _GEODATUM_MAPPING[response.json()["geoDatum"]]
gdf = gdf.to_crs("epsg:3857")
gdf["x"] = gdf["geometry"].x
gdf["y"] = gdf["geometry"].y
drop_cols = [
col
for col in gdf.columns
if col
not in [
"locationId",
"description",
"shortName",
"parentLocationId",
"relations",
"x",
"y",
"geometry",
]
]
gdf = gdf.drop(drop_cols, axis=1)
gdf.index = gdf["locationId"]
# self.locations = gdf
self.timer.report("Locations parsed")
return gdf
def get_timeseries(
self,
filterId,
locationIds=None,
startTime=None,
endTime=None,
parameterIds=None,
qualifierIds=None,
documentVersion=None,
thinning=None,
onlyHeaders=False,
unreliables=False,
showStatistics=False,
buffer=None
):
"""Get timeseries within a filter, optionally filtered by other variables."""
result = None
rest_url = f"{self.url}timeseries"
#print(f"thinning: {thinning}")
parameters = {
key: value
for key, value in locals().items()
if value and (key in _REQUEST_PARAMETERS_ALLOWED["timeseries"])
}
#apply buffer (if supplied):
if buffer:
time_delta = pd.Timestamp(endTime) - pd.Timestamp(startTime)
buffer_delta = time_delta * buffer
parameters["startTime"] = (
pd.Timestamp(startTime) - buffer_delta
).strftime("%Y-%m-%dT%H:%M:%SZ")
parameters["endTime"] = (
pd.Timestamp(endTime) + buffer_delta
).strftime("%Y-%m-%dT%H:%M:%SZ")
parameters.update({"documentFormat": self.document_format})
self.timer.reset()
#print(parameters)
response = requests.get(rest_url, parameters, verify=self.ssl_verify)
self.logger.debug(response.url)
if response.status_code == 200:
if onlyHeaders:
self.timer.report("Timeseries headers request")
result = response.json()
elif "timeSeries" in response.json().keys():
self.timer.report("TimeSeries request")
result = []
for time_series in response.json()["timeSeries"]:
ts = {}
if "header" in time_series.keys():
ts["header"] = time_series["header"]
if "events" in time_series.keys():
df = pd.DataFrame(time_series["events"])
if not unreliables:
df = df.loc[pd.to_numeric(df["flag"]) < 6]
df["datetime"] = pd.to_datetime(df["date"]) + pd.to_timedelta(
df["time"]
)
df["value"] = | pd.to_numeric(df["value"]) | pandas.to_numeric |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import os
import posixpath
import numpy as np
import pandas
import tables
import warnings
from pyiron_base import GenericParameters, Settings
from pyiron.atomistics.job.potentials import PotentialAbstract, find_potential_file_base
__author__ = "<NAME>"
__copyright__ = (
"Copyright 2020, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "development"
__date__ = "Sep 1, 2017"
s = Settings()
class VaspPotentialAbstract(PotentialAbstract):
"""
Args:
potential_df:
default_df:
selected_atoms:
"""
def __init__(self, potential_df=None, default_df=None, selected_atoms=None):
if potential_df is None:
potential_df = self._get_potential_df(
plugin_name="vasp",
file_name_lst={"potentials_vasp.csv"},
backward_compatibility_name="vasppotentials",
)
super(VaspPotentialAbstract, self).__init__(
potential_df=potential_df,
default_df=default_df,
selected_atoms=selected_atoms,
)
def default(self):
if self._default_df is not None:
return pandas.concat(
[
self._potential_df[
(
self._potential_df["Name"]
== self._default_df.loc[atom].values[0]
)
]
for atom in self._selected_atoms
]
)
return None
def find_default(self, element):
if isinstance(element, set):
element = element
elif isinstance(element, list):
element = set(element)
elif isinstance(element, str):
element = set([element])
else:
raise TypeError("Only, str, list and set supported!")
element_lst = list(element)
if self._default_df is not None:
merged_lst = list(set(self._selected_atoms + element_lst))
return pandas.concat(
[
self._potential_df[
(
self._potential_df["Name"]
== self._default_df.loc[atom].values[0]
)
]
for atom in merged_lst
]
)
return None
def find(self, element):
if isinstance(element, set):
element = element
elif isinstance(element, list):
element = set(element)
elif isinstance(element, str):
element = set([element])
else:
raise TypeError("Only, str, list and set supported!")
element_lst = list(element)
merged_lst = list(set(self._selected_atoms + element_lst))
return pandas.concat(
[super(VaspPotentialAbstract, self).find({atom}) for atom in merged_lst]
)
def list(self):
if len(self._selected_atoms) != 0:
return pandas.concat(
[
super(VaspPotentialAbstract, self).find({atom})
for atom in self._selected_atoms
]
)
else:
return pandas.DataFrame({})
def list_potential_names(self):
df = self.list()
if len(df) != 0:
return list(self.list()["Name"])
else:
return []
@staticmethod
def _return_potential_file(file_name):
for resource_path in s.resource_paths:
resource_path_potcar = os.path.join(
resource_path, "vasp", "potentials", file_name
)
if os.path.exists(resource_path_potcar):
return resource_path_potcar
return None
def __dir__(self):
return [val.replace("-", "_") for val in self.list_potential_names()]
def __getitem__(self, item):
item_replace = item.replace("_gga_pbe", "-gga-pbe").replace("_lda", "-lda")
if item_replace in self.list_potential_names():
df = self.list()
return self._return_potential_file(
file_name=list(df[df["Name"] == item_replace]["Filename"])[0][0]
)
selected_atoms = self._selected_atoms + [item]
return VaspPotentialAbstract(
potential_df=self._potential_df,
default_df=self._default_df,
selected_atoms=selected_atoms,
)
class VaspPotentialFile(VaspPotentialAbstract):
"""
The Potential class is derived from the PotentialAbstract class, but instead of loading the potentials from a list,
the potentials are loaded from a file.
Args:
xc (str): Exchange correlation functional ['PBE', 'LDA']
"""
def __init__(self, xc=None, selected_atoms=None):
potential_df = self._get_potential_df(
plugin_name="vasp",
file_name_lst={"potentials_vasp.csv"},
backward_compatibility_name="vasppotentials",
)
if xc == "PBE":
default_df = self._get_potential_default_df(
plugin_name="vasp",
file_name_lst={"potentials_vasp_pbe_default.csv"},
backward_compatibility_name="defaultvasppbe",
)
potential_df = potential_df[(potential_df["Model"] == "gga-pbe")]
elif xc == "GGA":
default_df = self._get_potential_default_df(
plugin_name="vasp",
file_name_lst={"potentials_vasp_pbe_default.csv"},
backward_compatibility_name="defaultvasppbe",
)
potential_df = potential_df[(potential_df["Model"] == "gga-pbe")]
elif xc == "LDA":
default_df = self._get_potential_default_df(
plugin_name="vasp",
file_name_lst={"potentials_vasp_lda_default.csv"},
backward_compatibility_name="defaultvasplda",
)
potential_df = potential_df[(potential_df["Model"] == "lda")]
else:
raise ValueError(
'The exchange correlation functional has to be set and it can either be "LDA" or "PBE"'
)
super(VaspPotentialFile, self).__init__(
potential_df=potential_df,
default_df=default_df,
selected_atoms=selected_atoms,
)
def add_new_element(self, parent_element, new_element):
"""
Adding a new user defined element with a different POTCAR file. It is assumed that the file exists
Args:
parent_element (str): Parent element
new_element (str): Name of the new element (the name of the folder where the new POTCAR file exists
"""
ds = self.find_default(element=parent_element)
ds["Species"].values[0][0] = new_element
path_list = ds["Filename"].values[0][0].split("/")
path_list[-2] = new_element
name_list = ds["Name"].values[0].split("-")
name_list[0] = new_element
ds["Name"].values[0] = "-".join(name_list)
ds["Filename"].values[0][0] = "/".join(path_list)
self._potential_df = self._potential_df.append(ds)
if new_element not in self._default_df.index.values:
ds = | pandas.Series() | pandas.Series |
#--------------------------------------------------------
# Import Packages
#--------------------------------------------------------
from neorl.benchmarks import KP
from neorl import PPO2, DQN, ACER, ACKTR, A2C
from neorl import MlpPolicy, DQNPolicy
from neorl import RLLogger
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import sys
#--------------------------------------------------------
# KP Data
#--------------------------------------------------------
def KP_Data(n_objects):
""""
Function provides initial data to construct a Knapsack problem enviroment
:param n_objects: (int) number of objects, choose either 50 or 100
:return: obj_list (list), optimum_knapsack (list), episode_length (int), weight_capacity (int)
"""
if n_objects == 50:
#---50 objects
obj_list = [[3,4],[8,4],[4,2],[9,4],[5,9],[3,6],[3,1],[9,2],[8,3],[6,8],[9,4],[4,2],[4,7],[5,1],[6,4],[5,8],[2,1],[5,7],[2,5],[7,4],\
[6,3],[8,2],[7,7],[4,8],[5,8],[2,1],[3,7],[7,4],[9,1],[1,4],[2,2],[6,4],[7,3],[2,6],[7,3],[9,1],[1,1],[1,9],[2,3],[5,8],[5,1],[3,9],\
[5,6],[5,7],[4,2],[2,3],[1,4],[8,3],[7,5],[1,6]]
#optimal solution for comparison
optimum_knapsack = [1,2,3,4,7,8,9,10,11,12,14,15,16,17,18,20,21,22,23,25,26,28,29,31,32,33,35,36,39,41,43,44,45,48,49]
#episode length
episode_length = 2
weight_capacity = 125
elif n_objects == 100:
#---100 objects
obj_list = [[1,4],[9,5],[9,7],[6,8],[3,7],[8,4],[8,6],[2,1],[2,6],[9,7],[8,2],[6,6],[6,9],[6,7],[4,4],[7,8],[1,9],[1,3],[5,3],[8,1],\
[5,7],[8,6],[2,8],[3,5],[3,8],[4,3],[8,2],[6,7],[4,9],[3,5],[9,1],[9,3],[5,6],[2,2],[2,1],[5,9],[6,2],[1,3],[8,3],[8,8],[3,8],[4,6],\
[4,7],[9,7],[9,4],[8,8],[2,7],[4,4],[1,2],[3,4],[8,8],[6,9],[4,7],[6,8],[8,7],[4,8],[7,9],[5,9],[8,8],[5,4],[2,2],[4,9],[1,4],[1,8],\
[8,6],[4,5],[9,1],[3,1],[6,2],[7,1],[1,6],[1,7],[9,7],[7,5],[7,1],[5,6],[3,5],[8,8],[8,9],[2,9],[3,1],[5,9],[7,8],[4,3],[2,8],[8,4],\
[9,5],[6,7],[8,2],[3,5],[2,6],[3,2],[9,7],[1,1],[6,7],[7,4],[6,4],[7,6],[6,4],[3,2]]
#optimal solution for comparison
optimum_knapsack = [2,3,6,7,8,10,11,12,14,15,16,19,20,22,26,27,28,31,32,34,35,37,39,40,44,45,46,48,51,55,59,60,61,65,66,67,68,69,\
70,73,74,75,78,79,81,83,84,86,87,89,92,93,94,96,97,98,99,100]
#episode length
episode_length = 2
weight_capacity= 250
else:
raise ValueError('--error: n_objects is not defined, either choose 50 or 100')
return obj_list, optimum_knapsack, episode_length, weight_capacity
#--------------------------------------------------------
# User Parameters for RL Optimisation
#--------------------------------------------------------
try:
total_steps=int(sys.argv[1]) #get time steps as external argument (for quick testing)
except:
total_steps=8000 #or use default total time steps to run all optimizers
n_steps=12 #update frequency for A2C, ACKTR, PPO
n_objects=50 #number of objects: choose 50 or 100
n_sum_steps=10 #this is for logging and averaging purposes
#---get some data to initialize the enviroment---
obj_list, optimum_knapsack, episode_length, weight_capacity=KP_Data(n_objects=n_objects)
#--------------------------------------------------------
# DQN
#--------------------------------------------------------
#create an enviroment object from the class
env=KP(obj_list=obj_list, optimum_knapsack=optimum_knapsack,
episode_length=episode_length, weight_capacity=weight_capacity, method = 'dqn')
#create a callback function to log data
cb_dqn=RLLogger(check_freq=1)
#To activate logger plotter, add following arguments to cb_dqn:
#plot_freq = 50,n_avg_steps=10,pngname='DQN-reward'
#Also applicable to ACER.
#create a RL object based on the env object
dqn = DQN(DQNPolicy, env=env, seed=1)
#optimise the enviroment class
dqn.learn(total_timesteps=total_steps*n_sum_steps, callback=cb_dqn)
#--------------------------------------------------------
# ACER
#--------------------------------------------------------
env=KP(obj_list=obj_list, optimum_knapsack=optimum_knapsack,
episode_length=episode_length, weight_capacity=weight_capacity, method = 'acer')
cb_acer=RLLogger(check_freq=1)
acer = ACER(MlpPolicy, env=env, seed=1)
acer.learn(total_timesteps=total_steps*n_sum_steps, callback=cb_acer)
#--------------------------------------------------------
# PPO
#--------------------------------------------------------
env=KP(obj_list=obj_list, optimum_knapsack=optimum_knapsack,
episode_length=episode_length, weight_capacity=weight_capacity, method = 'ppo')
cb_ppo=RLLogger(check_freq=1)
#To activate logger plotter, add following arguments to cb_ppo:
#plot_freq = 1, n_avg_steps=10, pngname='PPO-reward'
#Also applicable to A2C, ACKTR.
ppo = PPO2(MlpPolicy, env=env, n_steps=n_steps, seed = 1)
ppo.learn(total_timesteps=total_steps, callback=cb_ppo)
#--------------------------------------------------------
# ACKTR
#--------------------------------------------------------
env=KP(obj_list=obj_list, optimum_knapsack=optimum_knapsack,
episode_length=episode_length, weight_capacity=weight_capacity, method = 'acktr')
cb_acktr=RLLogger(check_freq=1)
acktr = ACKTR(MlpPolicy, env=env, n_steps=n_steps, seed = 1)
acktr.learn(total_timesteps=total_steps, callback=cb_acktr)
#--------------------------------------------------------
# A2C
#--------------------------------------------------------
env=KP(obj_list=obj_list, optimum_knapsack=optimum_knapsack,
episode_length=episode_length, weight_capacity=weight_capacity, method = 'a2c')
cb_a2c=RLLogger(check_freq=1)
a2c = A2C(MlpPolicy, env=env, n_steps=n_steps, seed = 1)
a2c.learn(total_timesteps=total_steps, callback=cb_a2c)
#--------------------------------
#Summary Results
#--------------------------------
print('--------------- DQN results ---------------')
print('The best value of x found:', cb_dqn.xbest)
print('The best value of y found:', cb_dqn.rbest)
print('--------------- ACER results ---------------')
print('The best value of x found:', cb_acer.xbest)
print('The best value of y found:', cb_acer.rbest)
print('--------------- PPO results ---------------')
print('The best value of x found:', cb_ppo.xbest)
print('The best value of y found:', cb_ppo.rbest)
print('--------------- ACKTR results ---------------')
print('The best value of x found:', cb_acktr.xbest)
print('The best value of y found:', cb_acktr.rbest)
print('--------------- A2C results ---------------')
print('The best value of x found:', cb_a2c.xbest)
print('The best value of y found:', cb_a2c.rbest)
#--------------------------------
#Summary Plots
#--------------------------------
log_dqn = pd.DataFrame(cb_dqn.r_hist).cummax(axis = 0).values
log_acer = pd.DataFrame(cb_acer.r_hist).cummax(axis = 0).values
log_ppo = pd.DataFrame(cb_ppo.r_hist).cummax(axis = 0).values
log_acktr = | pd.DataFrame(cb_acktr.r_hist) | pandas.DataFrame |
"""Module for data preprocessing.
You can consolidate data with `data_consolidation` and optimize it for example for machine learning models.
Then you can preprocess the data to be able to achieve even better results.
There are many small functions that you can use separately, but there is main function `preprocess_data` that
call all the functions based on input params for you. For inverse preprocessing use `preprocess_data_inverse`
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Generic, Any, cast, Union
from dataclasses import dataclass, astuple
import warnings
import importlib.util
from typing_extensions import Literal
import numpy as np
import pandas as pd
import mylogging
from .custom_types import DataFrameOrArrayGeneric
if TYPE_CHECKING:
from sklearn.preprocessing import MinMaxScaler, RobustScaler, StandardScaler
ScalerType = Union[MinMaxScaler, RobustScaler, StandardScaler]
# Lazy load
# import scipy.signal
# import scipy.stats
# from sklearn import preprocessing
def data_consolidation(
data: pd.DataFrame | np.ndarray,
predicted_column: int | str = None,
other_columns: int = 1,
datalength: int = 0,
datetime_column: str | int | None = "",
resample_freq: str | None = None,
resample_function: Literal[None, "sum", "mean"] = "sum",
embedding: Literal[None, "label", "one-hot"] = "label",
unique_threshold: float = 0.6,
remove_nans_threshold: float = 0.85,
remove_nans_or_replace: str | float = "interpolate",
dtype: str | np.dtype | pd.DataFrame | list = "float32",
) -> pd.DataFrame:
"""Transform input data in various formats and shapes into data in defined shape optimal for machine learning models, that other functions rely on.
If you have data in other format than dataframe, use `load_data` first.
Note:
This function return only numeric data. All string columns will be removed (use embedding if you need)
Predicted column is moved on index 0 !!!
Args:
data (pd.DataFrame | np.ndarray): Input data in well standardized format.
predicted_column (int | str, optional): Predicted column name or index. Move on first column and test if number.
If None, it's ignored. Defaults to None.
other_columns (int, optional): Whether use other columns or only predicted one. Defaults to 1.
datalength (int, optional): Data length after resampling. Defaults to 0.
datetime_column (str | int | None, optional): Name or index of datetime column. Defaults to None.
resample_freq (str | None, optional): Frequency of resampled data. Defaults to None.
resample_function (Literal[None, 'sum', 'mean'], optional): 'sum' or 'mean'. Whether sum resampled columns, or use average. Defaults to 'sum'.
embedding(Literal[None, "label", "one-hot"], optional): 'label' or 'one-hot'. Categorical encoding. Create numbers from strings. 'label' give each
category (unique string) concrete number. Result will have same number of columns. 'one-hot' create for every
category new column. Only columns, where are strings repeating (unique_threshold) will be used. Defaults to 'label'.
unique_threshold(float, optional): Remove string columns, that have to many categories. E.g 0.9 define, that if
column contain more that 90% of NOT unique values it's deleted. Min is 0, max is 1. It will remove ids,
hashes etc. Defaults to 0.6.
remove_nans_threshold (float, optional): From 0 to 1. Require that many non-nan numeric values to not be deleted.
E.G if value is 0.9 with column with 10 values, 90% must be numeric that implies max 1 np.nan can be presented,
otherwise column will be deleted. Defaults to 0.85.
remove_nans_or_replace (str | float, optional): 'interpolate', 'remove', 'neighbor', 'mean' or value. Remove or replace
rest nan values. If you want to keep nan, setup value to np.nan. If you want to use concrete value, use float or
int type. Defaults to 'interpolate'.
dtype (str | np.dtype | pd.DataFrame | list, optional): Output dtype. For possible inputs check pandas function `astype`. Defaults to 'float32'.
Raises:
KeyError, TypeError: May happen if wrong params. E.g. if predicted column name not found in dataframe.
Returns:
np.ndarray, pd.DataFrame, str: Data in standardized form. Data array for prediction - predicted column on index 0,
and column for ploting as pandas dataframe. Data has the same type as input.
"""
if not isinstance(data, pd.DataFrame):
try:
data = pd.DataFrame(data)
except Exception as err:
raise (
RuntimeError(
mylogging.return_str(
"Check configuration file for supported formats. It can be path of file (csv, json, parquet...) or it "
"can be data in python format (numpy array, pandas dataframe or series, dict or list, ). It can also be other "
"format, but then it have to work with pd.DataFrame(your_data)."
f"\n\n Detailed error: \n\n {err}",
caption="Data load failed",
)
)
)
data_for_predictions_df = data.copy()
if data_for_predictions_df.shape[0] < data_for_predictions_df.shape[1]:
mylogging.info(
"Input data must be in shape (n_samples, n_features) that means (rows, columns) Your shape is "
f" {data.shape}. It's unusual to have more features than samples. Probably wrong shape.",
caption="Data transposed warning!!!",
)
data_for_predictions_df = data_for_predictions_df.T
if predicted_column or predicted_column == 0:
if isinstance(predicted_column, str):
predicted_column_name = predicted_column
if predicted_column_name not in data_for_predictions_df.columns:
raise KeyError(
mylogging.return_str(
f"Predicted column name - '{predicted_column}' not found in data. Change 'predicted_column' in config"
f". Available columns: {list(data_for_predictions_df.columns)}",
caption="Column not found error",
)
)
elif isinstance(predicted_column, int) and isinstance(
data_for_predictions_df.columns[predicted_column], str
):
predicted_column_name = data_for_predictions_df.columns[predicted_column]
else:
predicted_column_name = "Predicted column"
data_for_predictions_df.rename(
columns={data_for_predictions_df.columns[predicted_column]: predicted_column_name},
inplace=True,
)
# Make predicted column index 0
data_for_predictions_df.insert(
0, predicted_column_name, data_for_predictions_df.pop(predicted_column_name)
)
else:
predicted_column_name = None
reset_index = False
if datetime_column not in [None, False, ""]:
try:
if isinstance(datetime_column, str):
data_for_predictions_df.set_index(datetime_column, drop=True, inplace=True)
else:
data_for_predictions_df.set_index(
data_for_predictions_df.columns[datetime_column], drop=True, inplace=True,
)
data_for_predictions_df.index = pd.to_datetime(data_for_predictions_df.index)
except Exception:
raise KeyError(
mylogging.return_str(
f"Datetime name / index from config - '{datetime_column}' not found in data or not datetime format. "
f"Change in config - 'datetime_column'. Available columns: {list(data_for_predictions_df.columns)}"
)
)
# Convert strings numbers (e.g. '6') to numbers
data_for_predictions_df = data_for_predictions_df.apply(pd.to_numeric, errors="ignore")
if embedding:
data_for_predictions_df = categorical_embedding(
data_for_predictions_df, embedding=embedding, unique_threshold=unique_threshold,
)
# Keep only numeric columns
data_for_predictions_df = data_for_predictions_df.select_dtypes(include="number")
if predicted_column_name:
# TODO [predictit] setup other columns in define input so every model can choose and simplier config input types
if not other_columns:
data_for_predictions_df = pd.DataFrame(data_for_predictions_df[predicted_column_name])
if predicted_column_name not in data_for_predictions_df.columns:
raise KeyError(
mylogging.return_str(
"Predicted column is not number datatype. Setup correct 'predicted_column' in py. "
f"Available columns with number datatype: {list(data_for_predictions_df.columns)}",
caption="Prediction available only on number datatype column.",
)
)
if datetime_column not in [None, False, ""]:
if resample_freq:
data_for_predictions_df.sort_index(inplace=True)
if resample_function == "mean":
data_for_predictions_df = data_for_predictions_df.resample(resample_freq).mean()
elif resample_function == "sum":
data_for_predictions_df = data_for_predictions_df.resample(resample_freq).sum()
data_for_predictions_df = data_for_predictions_df.asfreq(resample_freq, fill_value=0)
else:
data_for_predictions_df.index.freq = pd.infer_freq(data_for_predictions_df.index)
if data_for_predictions_df.index.freq is None:
reset_index = True
mylogging.warn(
"Datetime index was provided from config, but frequency guess failed. "
"Specify 'resample_freq' in config to resample and have equal sampling if you want "
"to have date in plot or if you want to have equal sampling. Otherwise index will "
"be reset because cannot generate date indexes of predicted values.",
caption="Datetime frequency not inferred",
)
# If frequency is not configured nor infered or index is not datetime, it's reset to be able to generate next results
if reset_index or not isinstance(
data_for_predictions_df.index,
(pd.core.indexes.datetimes.DatetimeIndex, pd._libs.tslibs.timestamps.Timestamp),
):
data_for_predictions_df.reset_index(inplace=True, drop=True)
# Define concrete dtypes in number columns
if dtype:
data_for_predictions_df = data_for_predictions_df.astype(dtype, copy=False)
# Trim the data on defined length
data_for_predictions_df = data_for_predictions_df.iloc[-datalength:, :]
data_for_predictions_df = pd.DataFrame(data_for_predictions_df)
# Remove columns that have to much nan values
if remove_nans_threshold:
data_for_predictions_df = data_for_predictions_df.iloc[:, 0:1].join(
data_for_predictions_df.iloc[:, 1:].dropna(
axis=1, thresh=len(data_for_predictions_df) * (remove_nans_threshold)
)
)
# Replace rest of nan values
if remove_nans_or_replace == "interpolate":
data_for_predictions_df.interpolate(inplace=True)
elif remove_nans_or_replace == "remove":
data_for_predictions_df.dropna(axis=0, inplace=True)
elif remove_nans_or_replace == "neighbor":
# Need to use both directions if first or last value is nan
data_for_predictions_df.fillna(method="ffill", inplace=True)
elif remove_nans_or_replace == "mean":
for col in data_for_predictions_df.columns:
data_for_predictions_df[col] = data_for_predictions_df[col].fillna(
data_for_predictions_df[col].mean()
)
if isinstance(remove_nans_or_replace, (int, float) or np.isnan(remove_nans_or_replace)):
data_for_predictions_df.fillna(remove_nans_or_replace, inplace=True)
# Forward fill and interpolate can miss som nans if on first row
else:
data_for_predictions_df.fillna(method="bfill", inplace=True)
return data_for_predictions_df
@dataclass
class PreprocessedData(Generic[DataFrameOrArrayGeneric]):
"""To be able to do inverse preprocessing (function preprocess_data_inverse), there are some values
going with preprocessed data.
Attributes:
preprocessed (DataFrameOrArrayGeneric): Preprocessed data.
last_undiff_value (Any): Last value from predicted column. This is necessary for inverse diff transformation.
final_scaler("ScalerType" | None): For inverse standardization.
"""
__slots__ = ("preprocessed", "last_undiff_value", "final_scaler")
preprocessed: DataFrameOrArrayGeneric
last_undiff_value: Any
final_scaler: "ScalerType" | None
def __iter__(self):
yield from astuple(self)
def preprocess_data(
data: DataFrameOrArrayGeneric,
remove_outliers: int | float | None = False,
smoothit: None | tuple[int, int] = None,
correlation_threshold: float | int | None = None,
data_transform: Literal["difference", None] = None,
standardizeit: Literal[None, "standardize", "01", "-11", "robust"] = "standardize",
bins: None | int = False,
binning_type: Literal["cut", "qcut"] = "cut",
) -> PreprocessedData[DataFrameOrArrayGeneric]:
"""Main preprocessing function, that call other functions based on configuration. Mostly for preparing
data to be optimal as input into machine learning models.
Args:
data (DataFrameOrArrayGeneric): Input data that we want to preprocess.
remove_outliers (int | float | None, optional): Whether remove unusual values far from average. Defaults to False.
smoothit (None | tuple[int, int], optional): Whether smooth the data with Savitzky-Golay filter.
Insert tuple with (window, polynom_order) parameters as in `smooth` function e.g (11, 2). Defaults to False.
correlation_threshold (float | None, optional): Whether remove columns that are corelated less than configured value
Value must be between 0 and 1. But if 0, than None correlation threshold is applied. Defaults to 0.
data_transform (Literal["difference", None], optional): Whether transform data. 'difference' transform data into differences between
neighbor values. Defaults to None.
standardizeit (Literal[None, "standardize", "-11", "01", "robust"], optional): How to standardize data. '01' and '-11' means scope from to for normalization.
'robust' use RobustScaler and 'standard' use StandardScaler - mean is 0 and std is 1. If no standardization, use None.
Defaults to 'standardize'.
bins (None | int, optional): Whether to discretize values into defined number of bins (their average). None make no discretization,
int define number of bins. Defaults to False.
binning_type (str, optional): "cut" for equal size of bins intervals (different number of members in bins)
or "qcut" for equal number of members in bins and various size of bins. It uses pandas cut
or qcut function. Defaults to 'cut'.
Returns:
PreprocessedData: If input in numpy array, then also output in array, if dataframe input, then dataframe output.
"""
preprocessed = data.copy()
if remove_outliers:
preprocessed = remove_the_outliers(preprocessed, threshold=remove_outliers)
if smoothit:
preprocessed = smooth(preprocessed, smoothit[0], smoothit[1])
if correlation_threshold:
preprocessed = keep_corelated_data(preprocessed, threshold=correlation_threshold)
if data_transform == "difference":
if isinstance(preprocessed, np.ndarray):
last_undiff_value = preprocessed[-1, 0]
else:
last_undiff_value = preprocessed.iloc[-1, 0]
preprocessed = do_difference(preprocessed)
else:
last_undiff_value = None
if standardizeit:
preprocessed, final_scaler = standardize(preprocessed, used_scaler=standardizeit)
else:
final_scaler = None
if bins:
preprocessed = binning(preprocessed, bins, binning_type)
preprocessed = cast(DataFrameOrArrayGeneric, preprocessed)
return PreprocessedData[DataFrameOrArrayGeneric](preprocessed, last_undiff_value, final_scaler)
def preprocess_data_inverse(
data: np.ndarray,
standardizeit: str | None = None,
final_scaler: "ScalerType" | None = None,
data_transform: Literal["difference", None] = None,
last_undiff_value: None | Any = None,
) -> np.ndarray:
"""Undo all data preprocessing to get real data. Not not inverse all the columns, but only predicted one.
Only predicted column is also returned. Order is reverse than preprocessing. Output is in numpy array.
Args:
data (np.ndarray): One dimension (one column) preprocessed data. Do not use ndim > 1.
standardizeit (str | None, optional): Whether use inverse standardization and what. Choices [None, 'standardize', '-11', '01', 'robust']. Defaults to False.
final_scaler ("ScalerType" | None, optional): Scaler used in standardization. Defaults to None.
data_transform (Literal["difference", None], optional): Use data transformation. Choices [False, 'difference]. Defaults to False.
last_undiff_value (None | Any, optional): Last used value in difference transform. Defaults to None.
Returns:
np.ndarray: Inverse preprocessed data
Raises:
TypeError: If variable necessary for particular inverse conversion is not defined.
"""
if standardizeit:
if final_scaler:
if TYPE_CHECKING:
final_scaler = cast(ScalerType, final_scaler)
data = final_scaler.inverse_transform(data.reshape(1, -1)).ravel()
else:
raise TypeError(
mylogging.return_str(
"If using `standardizeit`, then you need to define `final_scaler` for inverse transform."
)
)
if data_transform == "difference":
if last_undiff_value:
last_undiff_value = cast(float, last_undiff_value)
data = inverse_difference(data, last_undiff_value)
else:
raise TypeError(
mylogging.return_str(
"If using `data_transform == 'difference'`, then you need to define `last_undiff_value` for inverse transform."
)
)
return data
def categorical_embedding(
data: pd.DataFrame, embedding: Literal["label", "one-hot"] = "label", unique_threshold: float = 0.6
) -> pd.DataFrame:
"""Transform string categories such as 'US', 'FR' into numeric values, that can be used in machine learning model.
Args:
data (pd.DataFrame): Data with string (pandas Object dtype) columns.
embedding("label", "one-hot", optional): 'label' or 'one-hot'. Categorical encoding. Create numbers from strings. 'label'
give each category (unique string) concrete number. Result will have same number of columns.
'one-hot' create for every category new column. Only columns, where are strings repeating (unique_threshold)
will be used. Defaults to "label".
unique_threshold(float, optional): Remove string columns, that have to many categories (ids, hashes etc.).
E.g 0.9 defines that in column of length 100, max number of categories to not to be deleted is
10 (90% non unique repeating values). Defaults to 0.6. Min is 0, max is 1. Defaults is 0.6.
Returns:
pd.DataFrame: Dataframe where string columns transformed to numeric.
"""
data_for_embedding = data.copy()
to_drop = []
for i in data_for_embedding.select_dtypes(exclude=["number"]):
try:
if (data_for_embedding[i].nunique() / len(data_for_embedding[i])) > (1 - unique_threshold):
to_drop.append(i)
continue
data_for_embedding[i] = data_for_embedding[i].astype("category", copy=False)
if embedding == "label":
data_for_embedding[i] = data_for_embedding[i].cat.codes
if embedding == "one-hot":
data_for_embedding = data_for_embedding.join(pd.get_dummies(data_for_embedding[i]))
to_drop.append(i)
except Exception:
to_drop.append(i)
# Drop columns with too few categories - drop all columns at once to better performance
data_for_embedding.drop(to_drop, axis=1, inplace=True)
return data_for_embedding
### Data preprocessing functions...
def keep_corelated_data(data: DataFrameOrArrayGeneric, threshold: float = 0.5) -> DataFrameOrArrayGeneric:
"""Remove columns that are not corelated enough to predicted columns. Predicted column is supposed to be 0.
Args:
data (DataFrameOrArrayGeneric): Time series data.
threshold (float, optional): After correlation matrix is evaluated, all columns that are correlated less
than threshold are deleted. Defaults to 0.5.
Returns:
DataFrameOrArrayGeneric: Data with no columns that are not corelated with predicted column.
If input in numpy array, then also output in array, if dataframe input, then dataframe output.
"""
if data.ndim == 1 or data.shape[1] == 1:
return data
if isinstance(data, np.ndarray):
# If some row have no variance - RuntimeWarning warning in correlation matrix computing and then in comparing
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
corr = np.corrcoef(data.T)
corr = np.nan_to_num(corr, 0)
range_array = np.array(range(corr.shape[0]))
columns_to_del = range_array[abs(corr[0]) <= threshold]
data = np.delete(data, columns_to_del, axis=1)
elif isinstance(data, pd.DataFrame):
corr = data.corr().iloc[0, :]
corr = corr[~corr.isnull()]
names_to_del = list(corr[abs(corr) <= threshold].index)
data.drop(columns=names_to_del, inplace=True)
return data
def remove_the_outliers(
data: DataFrameOrArrayGeneric, threshold: int | float = 3, main_column: int | str = 0
) -> DataFrameOrArrayGeneric:
"""Remove values far from mean - probably errors. If more columns, then only rows that have outlier on
predicted column will be deleted. Predicted column is supposed to be 0.
Args:
data (DataFrameOrArrayGeneric): Time series data. Must have ndim = 2, if univariate, reshape...
threshold (int, optional): How many times must be standard deviation from mean to be ignored. Defaults to 3.
main_column (int | str, optional): Main column that we relate outliers to. Defaults to 0.
Returns:
DataFrameOrArrayGeneric: Cleaned data.
Examples:
>>> data = np.array([[1, 3, 5, 2, 3, 4, 5, 66, 3]])
>>> print(remove_the_outliers(data))
[[ 1 3 5 2 3 4 5 66 3]]
"""
if isinstance(data, np.ndarray):
data_mean = data[:, main_column].mean()
data_std = data[:, main_column].std()
range_array = np.array(range(data.shape[0]))
names_to_del = range_array[abs(data[:, main_column] - data_mean) > threshold * data_std]
data = np.delete(data, names_to_del, axis=0)
elif isinstance(data, pd.DataFrame):
if isinstance(main_column, int):
main_column = data.columns[main_column]
data_mean = data[main_column].mean()
data_std = data[main_column].std()
data = data[abs(data[main_column] - data_mean) < threshold * data_std]
return data
def do_difference(data: DataFrameOrArrayGeneric) -> DataFrameOrArrayGeneric:
"""Transform data into neighbor difference. For example from [1, 2, 4] into [1, 2].
Args:
data (DataFrameOrArrayGeneric): Data.
Returns:
DataFrameOrArrayGeneric: Differenced data in same format as inserted.
Examples:
>>> data = np.array([1, 3, 5, 2])
>>> print(do_difference(data))
[ 2 2 -3]
"""
if isinstance(data, np.ndarray):
return np.diff(data, axis=0)
elif isinstance(data, (pd.DataFrame, pd.Series)):
return data.diff().iloc[1:]
else:
raise TypeError(mylogging.return_str("Only DataFrame, Series or numpy array supported."))
def inverse_difference(differenced_predictions: np.ndarray, last_undiff_value: float) -> np.ndarray:
"""Transform do_difference transform back.
Args:
differenced_predictions (np.ndarray): One dimensional!! differenced data from do_difference function.
last_undiff_value (float): First value to computer the rest.
Returns:
np.ndarray: Normal data, not the additive series.
Examples:
>>> data = np.array([1, 1, 1, 1])
>>> print(inverse_difference(data, 1))
[2 3 4 5]
"""
assert differenced_predictions.ndim == 1, "Data input must be one-dimensional."
return np.insert(differenced_predictions, 0, last_undiff_value).cumsum()[1:]
def standardize(
data: DataFrameOrArrayGeneric, used_scaler: Literal["standardize", "01", "-11", "robust"] = "standardize"
) -> tuple[DataFrameOrArrayGeneric, "ScalerType"]:
"""Standardize or normalize data. More standardize methods available. Predicted column is supposed to be 0.
Args:
data (DataFrameOrArrayGeneric): Time series data.
used_scaler (Literal['standardize', '01', '-11', 'robust'], optional): '01' and '-11' means scope from to for normalization.
'robust' use RobustScaler and 'standardize' use StandardScaler - mean is 0 and std is 1. Defaults to 'standardize'.
Returns:
tuple[DataFrameOrArrayGeneric, ScalerType]: Standardized data and scaler for inverse transformation.
"""
if not importlib.util.find_spec("sklearn"):
raise ImportError(
"sklearn library is necessary for standardize function. Install via `pip install sklearn`"
)
from sklearn import preprocessing
if used_scaler == "01":
scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
elif used_scaler == "-11":
scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1))
elif used_scaler == "robust":
scaler = preprocessing.RobustScaler()
elif used_scaler == "standardize":
scaler = preprocessing.StandardScaler()
else:
raise TypeError(
mylogging.return_str(
f"Your scaler {used_scaler} not in options. Use one of ['01', '-11', 'robust', 'standardize']"
)
)
# First normalized values are calculated, then scler just for predicted value is computed again so no full matrix is necessary for inverse
if isinstance(data, pd.DataFrame):
normalized = data.copy()
normalized.iloc[:, :] = scaler.fit_transform(data.copy().values)
final_scaler = scaler.fit(data.values[:, 0].reshape(-1, 1))
else:
normalized = scaler.fit_transform(data)
final_scaler = scaler.fit(data[:, 0].reshape(-1, 1))
return normalized, final_scaler
def standardize_one_way(
data: DataFrameOrArrayGeneric, min: float, max: float, axis: Literal[0, 1] = 0, inplace: bool = False,
) -> DataFrameOrArrayGeneric:
"""Own implementation of standardization. No inverse transformation available.
Reason is for builded applications to do not carry sklearn with build.
Args:
data (DataFrameOrArrayGeneric): Data.
min (float): Minimum in transformed axis.
max (float): Max in transformed axis.
axis (Literal[0, 1], optional): 0 to columns, 1 to rows. Defaults to 0.
inplace (bool, optional): If true, no copy will be returned, but original object. Defaults to False.
Returns:
DataFrameOrArrayGeneric: Standardized data. If numpy inserted, numpy returned, same for dataframe.
If input in numpy array, then also output in array, if dataframe input, then dataframe output.
"""
if not inplace:
data = data.copy()
values = data.values if isinstance(data, pd.DataFrame) else data
if axis == 0:
values[:, :] = (values - np.nanmin(values, axis=0)) / (
np.nanmax(values, axis=0) - np.nanmin(values, axis=0)
) * (max - min) + min
elif axis == 1:
values[:, :] = (
(values.T - np.nanmin(values.T, axis=0))
/ (np.nanmax(values.T, axis=0) - np.nanmin(values.T, axis=0))
* (max - min)
+ min
).T
return data
def binning(
data: DataFrameOrArrayGeneric, bins: int, binning_type: Literal["cut", "qcut"] = "cut"
) -> DataFrameOrArrayGeneric:
"""Discretize value on defined number of bins. It will return the same shape of data, where middle
(average) values of bins interval returned.
Args:
data (DataFrameOrArrayGeneric): Data for preprocessing. ndim = 2 (n_samples, n_features).
bins (int): Number of bins - unique values.
binning_type (Literal["cut", "qcut"], optional): "cut" for equal size of bins intervals (different number of members in bins)
or "qcut" for equal number of members in bins and various size of bins. It uses pandas cut
or qcut function. Defaults to "cut".
Returns:
DataFrameOrArrayGeneric: Discretized data of same type as input. If input in numpy
array, then also output in array, if dataframe input, then dataframe output.
Example:
>>> import mydatapreprocessing.preprocessing as mdpp
...
>>> mdpp.binning(np.array(range(10)), bins=3, binning_type="cut")
array([[1.4955],
[1.4955],
[1.4955],
[1.4955],
[4.5 ],
[4.5 ],
[4.5 ],
[7.5 ],
[7.5 ],
[7.5 ]])
"""
convert_to_array = True if isinstance(data, np.ndarray) else False
data = | pd.DataFrame(data) | pandas.DataFrame |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Experiments consist of a
# - workload
# - data generator
# - query generator
# - oracle
# - sketches
#
# Running an experiment will generate a csv containing the experimental results
# This can be fed into a function/script/notebook to process and plot the results
#
# For an existing problem and new, experimental sketch, one just needs to create a sketch that wraps the
# implementation with the simple API used in the test.
#
# For a novel problem, one must also write a query generator and an oracle that will compute the correct answer and calculate the error
# when comparing to a sketch's answer.
#
#
# Replicates in an experiment come from
# - reordering / generating new data sequences (runs)
# - randomization at the sketch level (repetitions)
# To make things parallelizable we
# - For each run, write a workload's data sequence to disk (unless it's just drawing from a distribution)
# - Compute the oracle's answers for that sequence and cache it on disk
# - Create a bunch of jobs to evaluate a sketch on the data sequence using the cached answers
#
# Since our jobs contain nested class instances, python's default pickle doesn't work
# So we use dill to pickle the instances ourselves and unpickle when its run
#############################################################################################################################
import os, psutil
import logging
import time
from random import uniform
import pandas as pd
import numpy as np
from enum import Enum
import copy
import dill # needed to pickle classes/functions for use in multiprocessing
from collections import deque
import importlib
from concurrent.futures import ProcessPoolExecutor
from experiment_utils import makeDict
############
class ResultAccumulator:
def __init__(self, save_answers=False):
self.results = []
self.timings = []
self.save_answers = save_answers
def addResult(self, sketch, error, answer, truth, query, workload):
if not self.save_answers:
answer = None
query_info = query.info()
sketch_info = sketch.info()
workload_info = workload.info()
result = makeDict(error=error, answer=answer, truth=truth,
**query_info, **workload_info, **sketch_info)
#qid=query.qid, **workload_info, **sketch_info)
self.results.append(result)
def extend(self, results: list):
self.results.extend(results)
def merge(self, results):
self.results.extend(results.results)
self.timings.extend(results.timings)
def toDataFrame(self):
df = | pd.DataFrame(self.results) | pandas.DataFrame |
#%%
import os
import xml
import heapq
import warnings
import numpy as np
import pandas as pd
from tqdm import tqdm
from shapely import wkt
import geopandas as gpd
from xml.dom import minidom
from collections import deque
import matplotlib.pyplot as plt
from haversine import haversine, Unit
from shapely.geometry import Point, LineString, box
from utils.classes import Digraph
from utils.pickle_helper import PickleSaver
from utils.log_helper import LogHelper, logbook
from utils.interval_helper import merge_intervals
from coords.coordTransfrom_shp import coord_transfer
from utils.geo_helper import gdf_to_geojson, gdf_to_postgis, edge_parallel_offset
from setting import filters as way_filters
from setting import SZ_BBOX, GBA_BBOX, PCL_BBOX, FT_BBOX
warnings.filterwarnings('ignore')
#%%
class Digraph_OSM(Digraph):
def __init__(self,
bbox=None,
xml_fn='../input/futian.xml',
road_info_fn='../input/osm_road_speed.xlsx',
combine_link=True,
reverse_edge=True,
two_way_offeset=True,
logger=None,
upload_to_db='shenzhen',
*args, **kwargs):
assert not(bbox is None and xml_fn is None), "Please define one of the bbox or the xml path."
if bbox is not None:
xml_fn = f"../cache/osm_{'_'.join(map(str, bbox))}.xml"
self.download_map(xml_fn, bbox, True)
self.df_nodes, self.df_edges = self.get_road_network(xml_fn, road_info_fn)
self.node_dis_memo = {}
self.route_planning_memo = {}
self.logger = logger
super().__init__(self.df_edges[['s', 'e', 'dist']].values, self.df_nodes.to_dict(orient='index'), *args, **kwargs)
self.df_edges.set_crs('EPSG:4326', inplace=True)
self.df_nodes.set_crs('EPSG:4326', inplace=True)
if combine_link:
self.df_edges = self.combine_rids()
self.df_edges.reset_index(drop=True, inplace=True)
if reverse_edge:
self.df_edges = self.add_reverse_edge(self.df_edges)
self.df_edges.reset_index(drop=True, inplace=True)
self.df_edges.loc[:, 'eid'] = self.df_edges.index
if combine_link or reverse_edge:
# self.df_nodes = self.df_nodes.loc[ np.unique(np.hstack((self.df_edges.s.values, self.df_edges.e.values))),:]
super().__init__(self.df_edges[['s', 'e', 'dist']].values, self.df_nodes.to_dict(orient='index'), *args, **kwargs)
if two_way_offeset:
self.df_edges = self.edge_offset()
order_atts = ['eid', 'rid', 'name', 's', 'e', 'order', 'road_type', 'dir', 'lanes', 'dist', 'oneway', 'is_ring', 'geometry', 'geom_origin']
self.df_edges = self.df_edges[order_atts]
def download_map(self, fn, bbox, verbose=False):
"""Download OSM map of bbox from Internet.
Args:
fn (function): [description]
bbox ([type]): [description]
verbose (bool, optional): [description]. Defaults to False.
"""
if os.path.exists(fn):
return
if verbose:
print("Downloading {}".format(fn))
if isinstance(bbox, list) or isinstance(bbox, np.array):
bbox = ",".join(map(str, bbox))
import requests
url = f'http://overpass-api.de/api/map?bbox={bbox}'
r = requests.get(url, stream=True)
with open(fn, 'wb') as ofile:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
ofile.write(chunk)
if verbose:
print("Downloaded success.\n")
return True
def get_road_network(self,
fn,
fn_road,
in_sys='wgs',
out_sys='wgs',
signals=True,
road_type_filter=way_filters['auto']['highway'],
keep_cols=['name', 'rid', 'order', 'road_type', 'lanes', 's', 'e', 'dist', 'oneway', 'maxspeed', 'geometry']
):
dom = xml.dom.minidom.parse(fn)
root = dom.documentElement
nodelist = root.getElementsByTagName('node')
waylist = root.getElementsByTagName('way')
# nodes
nodes = []
for node in tqdm(nodelist, 'Parse nodes: \t'):
pid = node.getAttribute('id')
taglist = node.getElementsByTagName('tag')
info = {'pid': int(pid),
'y':float(node.getAttribute('lat')),
'x':float(node.getAttribute('lon'))}
for tag in taglist:
if tag.getAttribute('k') == 'traffic_signals':
info['traffic_signals'] = tag.getAttribute('v')
nodes.append(info)
nodes = gpd.GeoDataFrame(nodes)
nodes.loc[:, 'geometry'] = nodes.apply(lambda i: Point(i.x, i.y), axis=1)
# FIXME "None of ['pid'] are in the columns"
nodes.set_index('pid', inplace=True)
if in_sys != out_sys:
nodes = coord_transfer(nodes, in_sys, out_sys)
nodes.loc[:,['x']], nodes.loc[:,['y']] = nodes.geometry.x, nodes.geometry.y
# traffic_signals
self.traffic_signals = nodes[~nodes.traffic_signals.isna()].index.unique()
# edges
edges = []
for way in tqdm(waylist, 'Parse ways: \t'):
taglist = way.getElementsByTagName('tag')
info = { tag.getAttribute('k'): tag.getAttribute('v') for tag in taglist }
if 'highway' not in info or info['highway'] in road_type_filter:
continue
info['rid'] = int(way.getAttribute('id'))
ndlist = way.getElementsByTagName('nd')
nds = []
for nd in ndlist:
nd_id = nd.getAttribute('ref')
nds.append( nd_id )
for i in range( len(nds)-1 ):
edges.append( { 'order': i, 's':nds[i], 'e':nds[i+1], 'road_type': info['highway'], **info} )
edges = pd.DataFrame( edges )
edges.loc[:, ['s','e']] = pd.concat((edges.s.astype(np.int), edges.e.astype(np.int)), axis=1)
edges = edges.merge( nodes[['x','y']], left_on='s', right_index=True ).rename(columns={'x':'x0', 'y':'y0'}) \
.merge( nodes[['x','y']], left_on='e', right_index=True ).rename(columns={'x':'x1', 'y':'y1'})
edges = gpd.GeoDataFrame( edges, geometry = edges.apply( lambda i: LineString( [[i.x0, i.y0], [i.x1, i.y1]] ), axis=1 ) )
edges.loc[:, 'dist'] = edges.apply(lambda i: haversine((i.y0, i.x0), (i.y1, i.x1), unit=Unit.METERS), axis=1)
edges.sort_values(['rid', 'order'], inplace=True)
# nodes filter
ls = np.unique(np.hstack((edges.s.values, edges.e.values)))
nodes = nodes.loc[ls,:]
if fn_road and os.path.exists(fn_road):
road_speed = pd.read_excel(fn_road)[['road_type', 'v']]
edges = edges.merge( road_speed, on ='road_type' )
keep_cols = [i for i in keep_cols if i in edges.columns]
return nodes, edges[keep_cols]
def add_reverse_edge(self, df_edges):
"""Add reverse edge.
Args:
df_edges (gpd.GeoDataFrame): The edge file parsed from OSM.
Check:
rid = 34900355
net.df_edges.query( f"rid == {rid} or rid == -{rid}" ).sort_values(['order','rid'])
"""
def _juedge_oneway(oneway_flag):
# https://wiki.openstreetmap.org/wiki/Key:oneway
# reversible, alternating: https://wiki.openstreetmap.org/wiki/Tag:oneway%3Dreversible
if oneway_flag == 'yes' or oneway_flag == '1' or oneway_flag == True:
flag = True
elif oneway_flag == '-1':
flag = True
# way.is_reversed = True
elif oneway_flag == 'no' or oneway_flag == '0' or oneway_flag == False:
flag = False
elif oneway_flag in ['reversible', 'alternating']:
flag = False
else:
flag = False
if self.logger is not None:
self.logger.warning(f'new road type detected at: {oneway_flag}')
return flag
df_edges.oneway = df_edges.oneway.fillna('no').apply(_juedge_oneway)
df_edges.loc[:, 'is_ring'] = df_edges.geometry.apply( lambda x: x.is_ring)
df_edge_rev = df_edges.query('oneway == False and not is_ring')
df_edge_rev.loc[:, 'order'] = -df_edge_rev.order - 1
df_edge_rev.loc[:, 'geometry'] = df_edge_rev.geometry.apply( lambda x: LineString(x.coords[::-1]) )
df_edge_rev.rename(columns={'s':'e', 'e':'s'}, inplace=True)
df_edge_rev.loc[:, 'dir'] = -1
df_edges.loc[:, 'dir'] = 1
return df_edges.append(df_edge_rev).reset_index(drop=True)
def get_intermediate_point(self):
"""Identify the road segment with nodes of 1 indegree and 1 outdegree.
Returns:
[list]: Road segement list.
"""
return self.degree.query( "indegree == 1 and outdegree == 1" ).index.unique().tolist()
def combine_links_of_rid(self, rid, omit_rids, df_edges, plot=False, save_folder=None):
"""Combine OSM link.
Args:
rid (int): The id of link in OSM.
omit_rids (df): Subset of df_edges, the start point shoule meet: 1) only has 1 indegree and 1 outdegree; 2) not the traffic_signals point.
df_edges (df, optional): [description]. Defaults to net.df_edges.
Returns:
pd.DataFrame: The links after combination.
Example:
`new_roads = combine_links_of_rid(rid=25421053, omit_rids=omit_rids, plot=True, save_folder='../cache')`
"""
new_roads = df_edges.query(f"rid == @rid").set_index('order')
combine_orders = omit_rids.query(f"rid == @rid").order.values
combine_seg_indxs = merge_intervals([[x-1, x] for x in combine_orders if x > 0])
drop_index = []
for start, end, _ in combine_seg_indxs:
segs = new_roads.query(f"{start} <= order <= {end}")
pids = np.append(segs.s.values, segs.iloc[-1]['e'])
new_roads.loc[start, 'geometry'] = LineString([[self.node[p]['x'], self.node[p]['y']] for p in pids])
new_roads.loc[start, 'dist'] = segs.dist.sum()
new_roads.loc[start, 'e'] = segs.iloc[-1]['e']
drop_index += [ i for i in range(start+1, end+1) ]
new_roads.drop(index=drop_index, inplace=True)
new_roads.reset_index(inplace=True)
if save_folder is not None:
gdf_to_geojson(new_roads, os.path.join(save_folder, f"road_{rid}_after_combination.geojson"))
if plot:
new_roads.plot()
return new_roads
def combine_rids(self, ):
omit_pids = [ x for x in self.get_intermediate_point() if x not in self.traffic_signals ]
omit_records = self.df_edges.query( f"s in @omit_pids" )
omit_rids = omit_records.rid.unique().tolist()
keep_records = self.df_edges.query( f"rid not in @omit_rids" )
res = []
for rid in tqdm(omit_rids, 'Combine links: \t'):
res.append(self.combine_links_of_rid(rid, omit_records, self.df_edges))
comb_rids = gpd.GeoDataFrame( | pd.concat(res) | pandas.concat |
import pandas as pd
df = | pd.read_csv('data.csv', delimiter=',') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Data analysis for golf courses.
"""
# Import modules
import geopandas as gpd
import pandas as pd
import numpy as np
import glob
import matplotlib.pyplot as plt
from scipy import stats
# Define path to data
path = '/Users/jryan4/Dropbox (University of Oregon)/Parks_and_Golf/data/'
# Define save path
savepath = '/Users/jryan4/Dropbox (University of Oregon)/Parks_and_Golf/figures/'
# Define save data path
datapath = '/Users/jryan4/Dropbox (University of Oregon)/Parks_and_Golf/repo/'
###############################################################################
# Number of golf courses in GolfNationwide.com directory
###############################################################################
database_list = glob.glob(path + 'golfnationwide/*.csv')
df_list = []
for infile in database_list:
# Read file
data = | pd.read_csv(infile) | pandas.read_csv |
import pandas as pd
import numpy as np
import re
from tqdm.notebook import tqdm
import random
import sklearn.metrics
from sklearn.pipeline import Pipeline
# For XGBoost Regression and Classification
import xgboost as xgb
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV, cross_val_score, KFold
from sklearn.metrics import mean_squared_error, f1_score, r2_score, mean_absolute_error
import catboost
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.tree import DecisionTreeRegressor
import lightgbm
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import VotingRegressor
class ModelsParameters:
def __init__(self, dictionary_params):
self.dictionary_params = dictionary_params
""" I need this function for having all keys for the coming functions """
## Functions for creating a dictionary by simply inputting values of the params for
## each type of estimator
# Create dictionary with all params of RandomForest
def random_forest_params(
n_estimators=[100], # The number of trees in the forest.
criterion=['mse'], # {“mse”, “mae”}, default=”mse”. The function to measure the quality of a split.
max_depth=[None], # The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples
min_samples_split=[2], # The minimum number of samples required to split an internal node
min_samples_leaf=[1], # The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf=[0.0], # The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node
max_features=['auto'], # {“auto”, “sqrt”, “log2”}, int or float, default=”auto” The number of features to consider when looking for the best split. If auto, == n_features [if not so many]
max_leaf_nodes=[None], # pruning? [FIX]
min_impurity_decrease=[0.0],
min_impurity_split=[None],
bootstrap=[True],
oob_score=[False], # whether to use out-of-bag samples to estimate the R^2 on unseen data [should be true? FIX but then it will be less comparable]
n_jobs=[None],
random_state=[None],
verbose=[0],
warm_start=[False],
ccp_alpha=[0.0], # Complexity parameter used for Minimal Cost-Complexity Pruning [FIX]. The subtree with the largest cost complexity that is smaller than ccp_alpha will be chosen.
max_samples=[None], # bootstrap is True, the number of samples to draw from X to train each base estimator. If None, == to X.shape[0]
):
params_dict = {
'n_estimators': [n_estimators][0],
'criterion': [criterion][0],
'max_depth': [max_depth][0],
'min_samples_split': [min_samples_split][0],
'min_samples_leaf': [min_samples_leaf][0],
'min_weight_fraction_leaf': [min_weight_fraction_leaf][0],
'max_features': [max_features][0],
'max_leaf_nodes': [max_leaf_nodes][0],
'min_impurity_decrease': [min_impurity_decrease][0],
'min_impurity_split': [min_impurity_split][0],
'bootstrap': [bootstrap][0],
'oob_score': [oob_score][0],
'n_jobs': [n_jobs][0],
'random_state': [random_state][0],
'verbose': [verbose][0],
'warm_start': [warm_start][0],
'ccp_alpha': [ccp_alpha][0],
'max_samples': [max_samples][0],
}
return params_dict
def rf_params_pipeline(self, existing_prefix='', prefix_to_add='rf__'):
params_dict = {
prefix_to_add+'n_estimators': self.dictionary_params[existing_prefix+'n_estimators'],
prefix_to_add+'criterion': self.dictionary_params[existing_prefix+'criterion'],
prefix_to_add+'max_depth': self.dictionary_params[existing_prefix+'max_depth'],
prefix_to_add+'min_samples_split': self.dictionary_params[existing_prefix+'min_samples_split'],
prefix_to_add+'min_samples_leaf': self.dictionary_params[existing_prefix+'min_samples_leaf'],
prefix_to_add+'min_weight_fraction_leaf': self.dictionary_params[existing_prefix+'min_weight_fraction_leaf'],
prefix_to_add+'max_features': self.dictionary_params[existing_prefix+'max_features'],
prefix_to_add+'max_leaf_nodes': self.dictionary_params[existing_prefix+'max_leaf_nodes'],
prefix_to_add+'min_impurity_decrease': self.dictionary_params[existing_prefix+'min_impurity_decrease'],
prefix_to_add+'min_impurity_split': self.dictionary_params[existing_prefix+'min_impurity_split'],
prefix_to_add+'bootstrap': self.dictionary_params[existing_prefix+'bootstrap'],
prefix_to_add+'oob_score': self.dictionary_params[existing_prefix+'oob_score'],
prefix_to_add+'n_jobs': self.dictionary_params[existing_prefix+'n_jobs'],
prefix_to_add+'random_state': self.dictionary_params[existing_prefix+'random_state'],
prefix_to_add+'verbose': self.dictionary_params[existing_prefix+'verbose'],
prefix_to_add+'warm_start': self.dictionary_params[existing_prefix+'warm_start'],
prefix_to_add+'ccp_alpha': self.dictionary_params[existing_prefix+'ccp_alpha'],
prefix_to_add+'max_samples': self.dictionary_params[existing_prefix+'max_samples'],
}
return params_dict
def adaboost_params(
base_estimator=[None],
n_estimators=[50],
learning_rate=[1.0],
loss=['linear'],
random_state=[None]
):
params_dict = {
'base_estimator': [base_estimator][0],
'n_estimators': [n_estimators][0],
'learning_rate': [learning_rate][0],
'loss': [loss][0],
'random_state': [random_state][0]
}
return params_dict
def ab_params_pipeline(self, existing_prefix='', prefix_to_add='ab__'):
params_dict = {
prefix_to_add+'base_estimator': self.dictionary_params[existing_prefix+'base_estimator'],
prefix_to_add+'n_estimators': self.dictionary_params[existing_prefix+'n_estimators'],
prefix_to_add+'learning_rate': self.dictionary_params[existing_prefix+'learning_rate'],
prefix_to_add+'loss': self.dictionary_params[existing_prefix+'loss'],
prefix_to_add+'random_state': self.dictionary_params[existing_prefix+'random_state'],
}
return params_dict
def gradientboost_params(
loss=['ls'],
learning_rate=[0.1],
n_estimators=[100],
subsample=[1.0],
criterion=['friedman_mse'],
min_samples_split=[2],
min_samples_leaf=[1],
min_weight_fraction_leaf=[0.0],
max_depth=[3],
min_impurity_decrease=[0.0],
# min_impurity_split=[None], # deprecated FIX
init=[None],
random_state=[None],
max_features=[None],
alpha=[0.9],
verbose=[0],
max_leaf_nodes=[None],
warm_start=[False],
presort=['deprecated'],
validation_fraction=[0.1],
n_iter_no_change=[None],
tol=[0.0001],
ccp_alpha=[0.0],
):
params_dict = {
'loss': [loss][0],
'learning_rate': [learning_rate][0],
'n_estimators': [n_estimators][0],
'subsample': [subsample][0],
'criterion': [criterion][0],
'min_samples_split': [min_samples_split][0],
'min_samples_leaf': [min_samples_leaf][0],
'min_weight_fraction_leaf': [min_weight_fraction_leaf][0],
'max_depth': [max_depth][0],
'min_impurity_decrease': [min_impurity_decrease][0],
# 'min_impurity_split': [min_impurity_split][0],
'init': [init][0],
'random_state': [random_state][0],
'max_features': [max_features][0],
'alpha': [alpha][0],
'verbose': [verbose][0],
'max_leaf_nodes': [max_leaf_nodes][0],
'warm_start': [warm_start][0],
'presort': [presort][0],
'validation_fraction': [validation_fraction][0],
'n_iter_no_change': [n_iter_no_change][0],
'tol': [tol][0],
'ccp_alpha': [ccp_alpha][0],
}
return params_dict
def gb_params_pipeline(self, existing_prefix='', prefix_to_add='gb__'):
params_dict = {
prefix_to_add+'loss': self.dictionary_params[existing_prefix+'loss'],
prefix_to_add+'learning_rate': self.dictionary_params[existing_prefix+'learning_rate'],
prefix_to_add+'n_estimators': self.dictionary_params[existing_prefix+'n_estimators'],
prefix_to_add+'subsample': self.dictionary_params[existing_prefix+'subsample'],
prefix_to_add+'criterion': self.dictionary_params[existing_prefix+'criterion'],
prefix_to_add+'min_samples_split': self.dictionary_params[existing_prefix+'min_samples_split'],
prefix_to_add+'min_samples_leaf': self.dictionary_params[existing_prefix+'min_samples_leaf'],
prefix_to_add+'min_weight_fraction_leaf': self.dictionary_params[existing_prefix+'min_weight_fraction_leaf'],
prefix_to_add+'max_depth': self.dictionary_params[existing_prefix+'max_depth'],
prefix_to_add+'min_impurity_decrease': self.dictionary_params[existing_prefix+'min_impurity_decrease'],
# prefix_to_add+'min_impurity_split': self.dictionary_params[existing_prefix+'min_impurity_split'],
prefix_to_add+'init': self.dictionary_params[existing_prefix+'init'],
prefix_to_add+'random_state': self.dictionary_params[existing_prefix+'random_state'],
prefix_to_add+'max_features': self.dictionary_params[existing_prefix+'max_features'],
prefix_to_add+'alpha': self.dictionary_params[existing_prefix+'alpha'],
prefix_to_add+'verbose': self.dictionary_params[existing_prefix+'verbose'],
prefix_to_add+'max_leaf_nodes': self.dictionary_params[existing_prefix+'max_leaf_nodes'],
prefix_to_add+'warm_start': self.dictionary_params[existing_prefix+'warm_start'],
prefix_to_add+'presort': self.dictionary_params[existing_prefix+'presort'],
prefix_to_add+'validation_fraction': self.dictionary_params[existing_prefix+'validation_fraction'],
prefix_to_add+'n_iter_no_change': self.dictionary_params[existing_prefix+'n_iter_no_change'],
prefix_to_add+'tol': self.dictionary_params[existing_prefix+'tol'],
prefix_to_add+'ccp_alpha': self.dictionary_params[existing_prefix+'ccp_alpha'],
}
return params_dict
# XGBoost
def xgb_params(
objective=['reg:squarederror'],
n_estimators=[100],
max_depth=[10],
learning_rate=[0.3],
verbosity=[0],
booster=[None], # 'gbtree'
tree_method=['auto'],
n_jobs=[1],
gamma=[0],
min_child_weight=[None],
max_delta_step=[None],
subsample=[None],
colsample_bytree=[None],
colsample_bylevel=[None],
colsample_bynode=[None],
reg_alpha=[0],
reg_lambda=[0],
scale_pos_weight=[None],
base_score=[None],
random_state=[random.randint(0, 500)],
missing=[np.nan],
num_parallel_tree=[None],
monotone_constraints=[None],
interaction_constraints=[None],
importance_type=['gain']
):
params_dict = {
'objective': [objective][0],
'n_estimators': [n_estimators][0],
'max_depth': [max_depth][0],
'learning_rate': [learning_rate][0],
'verbosity': [verbosity][0],
'booster': [booster][0],
'tree_method': [tree_method][0],
'n_jobs': [n_jobs][0],
'gamma': [gamma][0],
'min_child_weight': [min_child_weight][0],
'max_delta_step': [max_delta_step][0],
'subsample': [subsample][0],
'colsample_bytree': [colsample_bytree][0],
'colsample_bylevel': [colsample_bylevel][0],
'colsample_bynode': [colsample_bynode][0],
'reg_alpha': [reg_alpha][0],
'reg_lambda': [reg_lambda][0],
'scale_pos_weight': [scale_pos_weight][0],
'base_score': [base_score][0],
'random_state': [random_state][0],
'missing': [missing][0],
'num_parallel_tree': [num_parallel_tree][0],
'monotone_constraints': [monotone_constraints][0],
'interaction_constraints': [interaction_constraints][0],
'importance_type': [importance_type][0]
}
return params_dict
def xgb_params_pipeline(self, existing_prefix='', prefix_to_add='xgb__'):
params_dict = {
prefix_to_add+'objective': self.dictionary_params[existing_prefix+'objective'],
prefix_to_add+'n_estimators': self.dictionary_params[existing_prefix+'n_estimators'],
prefix_to_add+'max_depth': self.dictionary_params[existing_prefix+'max_depth'],
prefix_to_add+'learning_rate': self.dictionary_params[existing_prefix+'learning_rate'],
prefix_to_add+'verbosity': self.dictionary_params[existing_prefix+'verbosity'],
prefix_to_add+'booster': self.dictionary_params[existing_prefix+'booster'],
prefix_to_add+'tree_method': self.dictionary_params[existing_prefix+'tree_method'],
prefix_to_add+'n_jobs': self.dictionary_params[existing_prefix+'n_jobs'],
prefix_to_add+'gamma': self.dictionary_params[existing_prefix+'gamma'],
prefix_to_add+'min_child_weight': self.dictionary_params[existing_prefix+'min_child_weight'],
prefix_to_add+'max_delta_step': self.dictionary_params[existing_prefix+'max_delta_step'],
prefix_to_add+'subsample': self.dictionary_params[existing_prefix+'subsample'],
prefix_to_add+'colsample_bytree': self.dictionary_params[existing_prefix+'colsample_bytree'],
prefix_to_add+'colsample_bylevel': self.dictionary_params[existing_prefix+'colsample_bylevel'],
prefix_to_add+'colsample_bynode': self.dictionary_params[existing_prefix+'colsample_bynode'],
prefix_to_add+'reg_alpha': self.dictionary_params[existing_prefix+'reg_alpha'],
prefix_to_add+'reg_lambda': self.dictionary_params[existing_prefix+'reg_lambda'],
prefix_to_add+'scale_pos_weight': self.dictionary_params[existing_prefix+'scale_pos_weight'],
prefix_to_add+'base_score': self.dictionary_params[existing_prefix+'base_score'],
prefix_to_add+'random_state': self.dictionary_params[existing_prefix+'random_state'],
prefix_to_add+'missing': self.dictionary_params[existing_prefix+'missing'],
prefix_to_add+'num_parallel_tree': self.dictionary_params[existing_prefix+'num_parallel_tree'],
prefix_to_add+'monotone_constraints': self.dictionary_params[existing_prefix+'monotone_constraints'],
prefix_to_add+'interaction_constraints': self.dictionary_params[existing_prefix+'interaction_constraints'],
prefix_to_add+'importance_type': self.dictionary_params[existing_prefix+'importance_type'],
}
return params_dict
# Greedy search?
def create_spaces(self, prefix_pipeline, estimator_name):
df = pd.DataFrame(data=[self.dictionary_params])
params_range = {}
for col in df.columns:
number = 0
string = 0 # not needed so far
nones = 0
trees = 0
string_key = str(col)
for i in df[col][0]:
type_i = type(i)
if (type_i == int) | (type_i == float):
number += 1
elif type_i == str: # not needed
string += 1
elif i == None: # not needed?
nones += 1
elif (type_i == DecisionTreeRegressor):
trees += 1
# Ranges for simple numeric values - FIX check upon them
if (number == len(df)) & (col != prefix_pipeline+'verbose') & \
(col != (prefix_pipeline+'random_state')) & (col != (prefix_pipeline+'verbosity')) \
& (col != (prefix_pipeline+'n_jobs')) & (trees == 0) & + (col != (prefix_pipeline+'n_iter_no_change')) & \
(col != (prefix_pipeline+'missing')) & (col != (prefix_pipeline+'validation_fraction')):
output = df[col][0][0]
if estimator_name == 'RandomForest':
range_output, lower_output, upper_output = ModelsParameters.rf_ranges(self, col, prefix_pipeline, output)
elif estimator_name == 'AdaBoost':
range_output, lower_output, upper_output = ModelsParameters.ab_ranges(self, col, prefix_pipeline, output, trees)
elif estimator_name == 'GradientBoosting':
range_output, lower_output, upper_output = ModelsParameters.gb_ranges(self, col, prefix_pipeline, output)
elif estimator_name == 'XGBoost':
range_output, lower_output, upper_output = ModelsParameters.xgb_ranges(self, col, prefix_pipeline, output)
# Further Conditions on the allowed output range and append
data_to_append = ModelsParameters.create_outputs(self, output, range_output, string_key, lower_output, upper_output)
params_range.update(data_to_append)
# Special Range for AdaBoost trees' max_depth
elif (trees > 0):
data_to_append = ModelsParameters.range_ab_decision_tree(df, self.dictionary_params, col, prefix_pipeline)
params_range.update(data_to_append)
# Else cases - just repeat the same value
else:
data_to_append = {string_key: [i]}
params_range.update(data_to_append)
return params_range
def rf_ranges(self, col, prefix_pipeline, output):
if col == prefix_pipeline+'n_estimators':
range_output = 5
elif col == prefix_pipeline+'max_depth':
range_output = 3
elif col == prefix_pipeline+'min_samples_split':
range_output = 2
elif col == prefix_pipeline+'min_samples_leaf':
range_output = 1
elif col == prefix_pipeline+'min_weight_fraction_leaf':
range_output = 0.05
elif col == prefix_pipeline+'max_features':
range_output = 0
elif col == prefix_pipeline+'max_leaf_nodes':
range_output = 0
elif col == prefix_pipeline+'min_impurity_decrease':
range_output = 0.2
elif col == prefix_pipeline+'ccp_alpha':
range_output = 0.2
elif col == prefix_pipeline+'max_samples':
range_output = 0
lower_output = output - range_output
upper_output = output + range_output
return range_output, lower_output, upper_output
def ab_ranges(self, col, prefix_pipeline, output, trees):
# FIX later: for not needed, thinking of merging with the estimator for tree
if trees == 0:
if col == prefix_pipeline+'n_estimators':
range_output = 5
elif col == prefix_pipeline+'learning_rate':
range_output = 0.01 # FIX: is learning rate max == 1?
else:
pass
lower_output = output - range_output
upper_output = output + range_output
return range_output, lower_output, upper_output
def range_ab_decision_tree(df, start_params, col, prefix_pipeline): # # For AdaBoost range of base_estimator max_depth
tree = df[col][0][0] # not needed
for i in start_params[col]:
x = re.split("\=", str(i))
y = re.split("\)", str(x[1]))[0]
max_depth = int(str(y))
output = sklearn.tree.DecisionTreeRegressor(max_depth=max_depth)
if col == prefix_pipeline+'base_estimator':
range_output = 3
lower_output = max_depth - range_output
upper_output = max_depth + range_output
if (range_output != 0) & (lower_output > 0):
data_to_append = {str(col): [
sklearn.tree.DecisionTreeRegressor(max_depth=lower_output),
output,
sklearn.tree.DecisionTreeRegressor(max_depth=upper_output)
]}
elif (range_output != 0) & (lower_output <= 0):
data_to_append = {str(col): [
output,
sklearn.tree.DecisionTreeRegressor(max_depth=upper_output)
]}
elif (range_output == 0):
data_to_append = {str(col): [
output
]}
return data_to_append
def gb_ranges(self, col, prefix_pipeline, output):
if col == prefix_pipeline+'learning_rate':
range_output = 0 # FIX: is learning rate max == 1?
elif col == prefix_pipeline+'n_estimators':
range_output = 5
elif col == prefix_pipeline+'subsample':
range_output = 0
elif col == prefix_pipeline+'min_samples_split':
range_output = 2
elif col == prefix_pipeline+'min_samples_leaf':
range_output = 1
elif col == prefix_pipeline+'min_weight_fraction_leaf':
range_output = 0.05
elif col == prefix_pipeline+'max_depth':
range_output = 3
elif col == prefix_pipeline+'min_impurity_decrease':
range_output = 0.2
elif col == prefix_pipeline+'max_features':
range_output = 0
elif col == prefix_pipeline+'alpha':
range_output = 0
elif col == prefix_pipeline+'max_leaf_nodes':
range_output = 0
elif col == prefix_pipeline+'tol':
range_output = 0
elif col == prefix_pipeline+'ccp_alpha':
range_output = 0
lower_output = output - range_output
upper_output = output + range_output
return range_output, lower_output, upper_output
def xgb_ranges(self, col, prefix_pipeline, output):
if col == prefix_pipeline+'n_estimators':
range_output = 5
elif col == prefix_pipeline+'max_depth':
range_output = 3
elif col == prefix_pipeline+'learning_rate':
range_output = 0 # FIX: is learning rate max == 1?
elif col == prefix_pipeline+'gamma':
range_output = 0
elif col == prefix_pipeline+'min_child_weight':
range_output = 0
elif col == prefix_pipeline+'max_delta_stop':
range_output = 0
elif col == prefix_pipeline+'subsample':
range_output = 0
elif col == prefix_pipeline+'colsample_bytree':
range_output = 0
elif col == prefix_pipeline+'colsample_bylevel':
range_output = 0
elif col == prefix_pipeline+'colsample_bynode':
range_output = 0
elif col == prefix_pipeline+'reg_alpha':
range_output = 0
elif col == prefix_pipeline+'reg_lambda':
range_output = 0
elif col == prefix_pipeline+'scale_pos_weight':
range_output = 0
elif col == prefix_pipeline+'base_score':
range_output = 0
elif col == prefix_pipeline+'monotone_constraints':
range_output = 0
elif col == prefix_pipeline+'interaction_constraints':
range_output = 0
lower_output = output - range_output
upper_output = output + range_output
return range_output, lower_output, upper_output
##
def create_outputs(self, output, range_output, string_key, lower_output, upper_output):
if range_output == 0:
data_to_append = {string_key: [
output
]}
elif (range_output != 0) & (lower_output > 0):
data_to_append = {string_key: [
lower_output,
output,
upper_output
]}
# FIX could be controversial in certain instances in case you want lower bound to be 0
elif (range_output != 0) & (lower_output == 0):
data_to_append = {string_key: [
output,
upper_output
]}
elif (lower_output < 0) & (output != 0):
data_to_append = {string_key: [
0,
output,
upper_output]}
elif (lower_output < 0) & (output == 0):
data_to_append = {string_key: [
output,
upper_output]}
return data_to_append
def best_model_pipeline(X, Y, pipeline, params_range, cv, scoring='neg_mean_squared_error'):
optimal_model = GridSearchCV(pipeline,
params_range,
scoring=scoring,
cv=cv,
refit=True) # when there is a list in scoring, it needs an explicit one. NP because here comes "s", not "scoring"
print('Below are the params_range')
print(params_range)
result = optimal_model.fit(X, Y)
best_params = result.best_estimator_ # result.best_params_ needed when refit=False
dict_parameters_pipeline = {}
for param in params_range: # list of parameters
dict_parameters_pipeline[str(param)] = [best_params.get_params()[str(param)]]
print('Below are the best params')
print(dict_parameters_pipeline)
return result, dict_parameters_pipeline
##
def NestedCV(X, Y, params, pipeline, prefix_pipeline=None, estimator=None, estimator_name=None,
NUM_TRIALS=1, # for repeated. Note that the sample is anew every time CHECK
inner_n_splits=5,
outer_n_splits=5,
adaptive_grid='yes',
scoring=['neg_mean_squared_error', 'neg_mean_absolute_error', 'neg_root_mean_squared_error'],
):
best_params = pd.DataFrame()
df_feature_importance = pd.DataFrame()
mse = list()
mae = list()
rmse = list()
score_list = list()
score_metric = list()
# PROPOSAL: evaluate with metrics listed. BLOCK for metrics]
## WRONG FIX. it is just differnet metric at the end. Shouldn0t be in loop like this
for s in scoring:
# PROPOSAL: nested CV
for i in tqdm(range(NUM_TRIALS)):
# configure the cross-validation procedure
cv_outer = KFold(n_splits=outer_n_splits, shuffle=True, random_state=i)
# [BLOCK for nested CV?
for train_ix, test_ix in cv_outer.split(X):
# split data
X_train, X_test = X.iloc[train_ix, :], X.iloc[test_ix, :]
Y_train, Y_test = Y.iloc[train_ix], Y.iloc[test_ix]
# configure the cross-validation procedure
cv_inner = KFold(n_splits=inner_n_splits,
shuffle=True,
random_state=i)
if (pipeline==None) & (adaptive_grid=='yes'):
params = ModelsParameters(params).create_spaces(prefix_pipeline='', estimator=estimator) # create range RF params in format for pipeline
result, best_params_grid = ModelsParameters.best_model_pipeline(X_train, Y_train,
pipeline=estimator,
params_range=params,
scoring=s, cv=cv_inner)
#print(best_params_grid)
print('done without pipeline')
# FIX test this version
feature_importances = pd.DataFrame(result.best_estimator_.feature_importances_.reshape(1, -1),
columns=X_train.columns)
df_feature_importance = df_feature_importance.append(
feature_importances, ignore_index=True)
# FIX features importance of the best model. Excluded with pipeline because there are no
elif (pipeline!=None) & (adaptive_grid=='yes'):
params = ModelsParameters(params).create_spaces(prefix_pipeline=prefix_pipeline, estimator_name=estimator_name) # create range RF params in format for pipeline
result, best_params_grid = ModelsParameters.best_model_pipeline(X_train, Y_train,
pipeline=pipeline,
params_range=params,
scoring=s, cv=cv_inner)
for value in best_params_grid:
data_to_update = {str(value): best_params_grid[value]}
params.update(data_to_update)
#print(best_params_grid)
print('done one with pipeline')
elif (pipeline!=None) & (adaptive_grid=='no'):
result, best_params_grid = ModelsParameters.best_model_pipeline(X_train, Y_train,
pipeline=pipeline,
params_range=params,
scoring=s, cv=cv_inner)
#print(best_params_grid)
print('done one with pipeline with no adaptive grid')
elif (pipeline==None) & (adaptive_grid=='no'):
result, best_params_grid = ModelsParameters.best_model_pipeline(X_train, Y_train,
pipeline=estimator,
params_range=params,
scoring=s, cv=cv_inner)
#print(best_params_grid)
print('done without pipeline and without adaptive grid')
feature_importances = pd.DataFrame(result.best_estimator_.feature_importances_.reshape(1, -1),
columns=X_train.columns)
df_feature_importance = df_feature_importance.append(
feature_importances, ignore_index=True)
# PROPOSAL: END function for metrics
# evaluate model on the hold out dataset (i.e. the test set)
# FIX the score_ variables names
if s == 'neg_mean_squared_error':
#best_model.fit(X_train, Y_train)
#score = best_model.score(X_test, Y_test) # for using pipeline?
score = mean_squared_error(Y_test, result.predict(X_test)) # result.predict(X_test): needed when refit=False
score_list.append(score) # which quickest? FIX
score_metric.append('MSE')
print('MSE=%.3f, est=%.3f' % (score, -result.best_score_))
elif s == 'neg_mean_absolute_error':
score = mean_absolute_error(Y_test, result.predict(X_test))
score_list.append(score)
score_metric.append('MAE')
print('MAE=%.3f, est=%.3f' % (score, -result.best_score_))
elif s == 'neg_root_mean_squared_error':
score = mean_squared_error(Y_test, result.predict(X_test), squared=False)
score_list.append(score)
score_metric.append('RMSE')
print('RMSE=%.3f, est=%.3f' % (score, -result.best_score_))
# best parameters
best_params = best_params.append(best_params_grid,
ignore_index=True)
# [BLOCK for proper output? or just not?
best_params['Score Value'] = score_list
best_params['Metric'] = score_metric
return best_params, df_feature_importance
""" FIND overall metrics/hyperparameters as sum/count/.. """
def find_best_params(best_params):
df = pd.DataFrame()
# Pretransform the format of the params dataframe
for col in best_params.loc[:, (best_params.columns != 'Score Value') & (best_params.columns != 'Metric')]: # & (best_params.columns != 'RMSE')]:
new_list = []
for i in range(len(best_params)):
new_item = best_params[str(col)][i][0]
new_list.append(new_item)
best_params[str(col)] = new_list
for metric in best_params['Metric'].unique():
data = {}
selected_metric_df = best_params.loc[best_params['Metric']==metric]
for col in selected_metric_df.columns:
number = 0
string = 0
nones = 0
trees = 0
string_col = str(col)
for i in selected_metric_df[col]:
type_i = type(i)
if (type_i == int) | (type_i == float):
number += 1
elif type_i == str:
string += 1
elif i == None:
nones += 1
elif (type_i == DecisionTreeRegressor): # for Adaboost
trees += 1
# If the params are numbers, it reports the mean
if (number == len(selected_metric_df[col])) & (nones == 0):
integer = 0
for i in selected_metric_df[col]:
type_i = type(i)
if (type_i == int):
output = int(selected_metric_df[col].mean())
data_to_append = {string_col: [output]}
data.update(data_to_append)
elif (type_i == float):
output = float(selected_metric_df[col].mean())
data_to_append = {string_col: [output]}
data.update(data_to_append)
# In case of string params, the most frequent one
elif (string == len(selected_metric_df[col])) & (nones == 0):
output = selected_metric_df[col].value_counts().reset_index()['index'][0]
data_to_append = {string_col: [output]}
data.update(data_to_append)
# In case of all Nones values, the value will be None
elif nones == len(selected_metric_df[col]):
data_to_append = {string_col: [None]}
data.update(data_to_append)
# In case some are Nones and other not, see below FIX
elif (nones > 0) & (nones != len(selected_metric_df[col])):
if (type(i) == int for i in selected_metric_df[col]):
if ((selected_metric_df[col].value_counts().sum() >= len(selected_metric_df)/2) == True):
output = int(selected_metric_df[col].value_counts().reset_index()['index'][0])
data_to_append = {string_col: [output]}
data.update(data_to_append)
else:
data_to_append = {string_col: [None]}
data.update(data_to_append)
elif (type(i) == float for i in selected_metric_df[col]):
if ((selected_metric_df[col].value_counts().sum() >= len(selected_metric_df)/2) == True):
output = float(selected_metric_df[col].value_counts().reset_index()['index'][0])
data_to_append = {string_col: [output]}
data.update(data_to_append)
else:
data_to_append = {string_col: [None]}
data.update(data_to_append)
df_partial = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
from netCDF4 import Dataset
from PolutantsTable import PolutantsTable as pt
class DataManager:
# originalDF conté el dataframe amb les dades baixades de la XVPCA
# Ex: data/AirQualityData/QualitatAire2016TotCatalunya2016.csv
originalDF = pd.DataFrame()
'''
dicctionary that stores a dataframe for each polutant in the originalDF
'''
by_polutant_dataframes = {}
wrf_files_path = 'D:\\URBAG\\Simulation\\'
def load_original_file(self, original_file):
'''
reads a CVS file in the XVPCA format where the column DATA is parsed as datetime
'''
dateparse = lambda x: | pd.datetime.strptime(x, '%d/%m/%Y') | pandas.datetime.strptime |
import folium
import pandas
df = | pandas.read_csv('oco.csv', delimiter='~') | pandas.read_csv |
# Loading Python libraries
import numpy as np
import pandas as pd
import scipy.stats as stats
import statsmodels.api as sm
import statsmodels.stats.multicomp as multi
from statsmodels.formula.api import ols
from IPython.display import Markdown
#%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
#sns.set()
#pd.options.display.float_format = '{:.3f}'.format
#np.set_printoptions(precision=3, suppress=True)
# Statistics functions
def parammct(data=None, independent=None, dependent=None):
independent = str(independent)
dependent = str(dependent)
if input_check_numerical_categorical(data, independent, dependent):
return
parammct_df = pd.DataFrame()
for value in pd.unique(data[independent]):
mean = data[dependent][data[independent]==value].mean()
stdev = data[dependent][data[independent]==value].std()
n = data[dependent][data[independent]==value].count()
sdemean = stdev/np.sqrt(n)
ci = 1.96*sdemean
lowerboundci = mean-ci
upperboundci = mean+ci
parammct_df[value] = pd.Series([mean, stdev, n, sdemean, lowerboundci, upperboundci],
index = ['Mean','SD','n','SEM','Lower bound CI', 'Upper bound CI'])
return parammct_df
def non_parammct(data=None, independent=None, dependent=None):
independent = str(independent)
dependent = str(dependent)
if input_check_numerical_categorical(data, independent, dependent):
return
non_parammct_df = pd.DataFrame()
for value in pd.unique(data[independent]):
median = data[dependent][data[independent]==value].median()
minimum = data[dependent][data[independent]==value].quantile(0)
q25 = data[dependent][data[independent]==value].quantile(0.25)
q75 = data[dependent][data[independent]==value].quantile(0.75)
maximum = data[dependent][data[independent]==value].quantile(1)
n = data[dependent][data[independent]==value].count()
non_parammct_df[value] = pd.Series([median, minimum, q25,q75, maximum, n],
index = ['Median', 'Minimum', 'Lower bound IQR', 'Upper bound IQR',
'Maximum', 'n'])
return non_parammct_df
def histograms(data=None, independent=None, dependent=None):
independent = str(independent)
dependent = str(dependent)
if input_check_numerical_categorical(data, independent, dependent):
return
for value in pd.unique(data[independent]):
sns.distplot(data[dependent][data[independent]==value], fit=stats.norm, kde=False)
plt.title(dependent + ' by ' + independent + '(' + str(value).lower() + ')',
fontweight='bold', fontsize=16)
plt.ylabel('Frequency', fontsize=14)
plt.xlabel(dependent, fontsize=14)
plt.show()
return
def t_test(data=None, independent=None, dependent=None):
pd.set_eng_float_format(accuracy=3, use_eng_prefix=False)
independent_groups = pd.unique(data[independent])
if len(independent_groups)>2:
print('There are more than 2 groups in the independent variable')
print('t-test is not the correct statistical test to run in that circumstance,')
print('consider running an ANOVA')
return
mct = parammct(data=data, independent=independent, dependent=dependent)
t_test_value, p_value = stats.ttest_ind(data[dependent][data[independent] == independent_groups[0]],
data[dependent][data[independent] == independent_groups[1]])
difference_mean = np.abs(mct.loc['Mean'][0] - mct.loc['Mean'][1])
pooled_sd = np.sqrt( ( ((mct.loc['n'][0]-1)*mct.loc['SD'][0]**2) + ((mct.loc['n'][1]-1)*mct.loc['SD'][1]**2) ) /
(mct.loc['n'][0] + mct.loc['n'][1] - 2) )
sedifference = pooled_sd * np.sqrt( (1/mct.loc['n'][0]) + (1/mct.loc['n'][1]) )
difference_mean_ci1 = difference_mean + (t_test_value * sedifference)
difference_mean_ci2 = difference_mean - (t_test_value * sedifference)
if difference_mean_ci1>difference_mean_ci2:
difference_mean_cilower = difference_mean_ci2
difference_mean_ciupper = difference_mean_ci1
else:
difference_mean_cilower = difference_mean_ci1
difference_mean_ciupper = difference_mean_ci2
cohend = difference_mean / pooled_sd
t_test_result= pd.DataFrame ([difference_mean, sedifference, t_test_value, p_value,
difference_mean_cilower, difference_mean_ciupper, cohend],
index = ['Difference between means', 'SE difference', 't-test', 'p-value',
'Lower bound difference CI', 'Upper bound difference CI', 'Cohen\'s d'],
columns=['Value'])
return t_test_result
def anova(data=None, independent=None, dependent=None):
pd.set_eng_float_format(accuracy=3, use_eng_prefix=False)
independent = str(independent)
dependent = str(dependent)
if input_check_numerical_categorical(data, independent, dependent):
return
formula = dependent + ' ~ ' + independent
model = ols(formula, data=data).fit()
aov_table = sm.stats.anova_lm(model, typ=2)
aov_table.rename(columns={'PR(>F)':'p'}, inplace=True)
aov_table['F'] = pd.Series([aov_table['F'][0], ''], index = [independent, 'Residual'])
aov_table['p'] = pd.Series([aov_table['p'][0], ''], index = [independent, 'Residual'])
eta_sq = aov_table['sum_sq'][0]/(aov_table['sum_sq'][0]+aov_table['sum_sq'][1])
aov_table['Eta squared'] = pd.Series([eta_sq, ''], index = [independent, 'Residual'])
return aov_table
def tukey(data=None, independent=None, dependent=None):
pd.set_eng_float_format(accuracy=3, use_eng_prefix=False)
independent = str(independent)
dependent = str(dependent)
if input_check_numerical_categorical(data, independent, dependent):
return
test = multi.MultiComparison(data[dependent], data[independent])
res = test.tukeyhsd()
display(res.summary())
res.plot_simultaneous()
return
def chi_square(data=None, variable1=None, variable2=None):
pd.set_eng_float_format(accuracy=3, use_eng_prefix=False)
variable1 = str(variable1)
variable2 = str(variable2)
if input_check_categorical_categorical(data, variable1, variable2):
return
values_var1=pd.unique(data[variable1])
values_var2=pd.unique(data[variable2])
problem_found=False
for variable in [values_var1, values_var2]:
if len(variable)<2:
print(variable, 'has less than two categories. It has:', len(variable))
problem_found=True
if problem_found:
return
contingency_table = pd.crosstab(data[variable1], data[variable2])
contingency_table = pd.DataFrame(contingency_table)
display(Markdown('**Contingency Table**'))
display(contingency_table)
chi2_test=stats.chi2_contingency(contingency_table, correction=False)
chi2_result = pd.Series ([chi2_test[0], chi2_test[1], chi2_test[2], chi2_test[3]],
index = ['Chi-square value', 'p-value', 'Degrees of freedom', 'Expected frequencies'])
chi2_result = | pd.DataFrame(chi2_result, columns=['Value']) | pandas.DataFrame |
#!/usr/bin/env python3
"""
Main file for processing data by age and week
"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from io import StringIO
from os.path import join
from sklearn.preprocessing import LabelEncoder
from matplotlib.patches import Patch
from loguru import logger
from typing import Dict, List, Tuple, Any, Union, Optional
from utils.enums import DatasetName, AgeGroup, Sexes
from utils.utils import get_file_path_relative
from utils.variables import path_dict, visualizations_folder, age_key, sex_key, week_key, covid_deaths_key
def to_categorical(input: pd.Series) -> List[Any]:
"""
returns the mapping of int to categorical value, and the categorized pandas series
"""
le = LabelEncoder()
output: np.ndarray = le.fit_transform(input)
le_name_mapping: Tuple[int, str] = tuple(zip(le.transform(le.classes_), le.classes_))
return [le_name_mapping, | pd.Series(output) | pandas.Series |
"""
Author: <NAME>
Created: 14/08/2020 11:04 AM
"""
import os
import numpy as np
import pandas as pd
from basgra_python import run_basgra_nz, _trans_manual_harv, get_month_day_to_nonleap_doy
from input_output_keys import matrix_weather_keys_pet
from check_basgra_python.support_for_tests import establish_org_input, get_org_correct_values, get_lincoln_broadfield, \
test_dir, establish_peyman_input, _clean_harvest, base_auto_harvest_data, base_manual_harvest_data
from supporting_functions.plotting import plot_multiple_results # used in test development and debugging
verbose = False
drop_keys = [ # newly added keys that must be dropped initially to manage tests, datasets are subsequently re-created
'WAFC',
'IRR_TARG',
'IRR_TRIG',
'IRRIG_DEM',
'RYE_YIELD',
'WEED_YIELD',
'DM_RYE_RM',
'DM_WEED_RM',
'DMH_RYE',
'DMH_WEED',
'DMH',
'WAWP',
'MXPAW',
'PAW',
'RESEEDED',
]
view_keys = [
'WAL',
'WCL',
'DM',
'YIELD',
'BASAL',
'ROOTD',
'IRRIG_DEM',
'HARVFR',
'RYE_YIELD',
'WEED_YIELD',
'DM_RYE_RM',
'DM_WEED_RM',
'DMH_RYE',
'DMH_WEED',
'DMH',
'WAWP', # # mm # Water in non-frozen root zone at wilting point
'MXPAW', # mm # maximum Profile available water
'PAW', # mm Profile available water at the time step
]
def test_trans_manual_harv(update_data=False):
test_nm = 'test_trans_manual_harv'
print('testing: ' + test_nm)
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
days_harvest = _clean_harvest(days_harvest, matrix_weather)
np.random.seed(1)
days_harvest.loc[:, 'harv_trig'] = np.random.rand(len(days_harvest))
np.random.seed(2)
days_harvest.loc[:, 'harv_targ'] = np.random.rand(len(days_harvest))
np.random.seed(3)
days_harvest.loc[:, 'weed_dm_frac'] = np.random.rand(len(days_harvest))
out = _trans_manual_harv(days_harvest, matrix_weather)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out, dropable=False)
def _output_checks(out, correct_out, dropable=True):
"""
base checker
:param out: basgra data from current test
:param correct_out: expected basgra data
:param dropable: boolean, if True, can drop output keys, allows _output_checks to be used for not basgra data and
for new outputs to be dropped when comparing results.
:return:
"""
if dropable:
# should normally be empty, but is here to allow easy checking of old tests against versions with a new output
drop_keys_int = [
]
out2 = out.drop(columns=drop_keys_int)
else:
out2 = out.copy(True)
# check shapes
assert out2.shape == correct_out.shape, 'something is wrong with the output shapes'
# check datatypes
assert issubclass(out.values.dtype.type, np.float), 'outputs of the model should all be floats'
out2 = out2.values
correct_out2 = correct_out.values
out2[np.isnan(out2)] = -9999.99999
correct_out2[np.isnan(correct_out2)] = -9999.99999
# check values match for sample run
isclose = np.isclose(out2, correct_out2)
asmess = '{} values do not match between the output and correct output with rtol=1e-05, atol=1e-08'.format(
(~isclose).sum())
assert isclose.all(), asmess
print(' model passed test\n')
def test_org_basgra_nz(update_data=False):
print('testing original basgra_nz')
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
# test against my saved version (simply to have all columns
data_path = os.path.join(test_dir, 'test_org_basgra.csv')
if update_data:
out.to_csv(data_path)
print(' testing against full dataset')
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
# test to the original data provided by <NAME>ward
out.drop(columns=drop_keys, inplace=True) # remove all of the newly added keys
print(' testing against Simon Woodwards original data')
correct_out2 = get_org_correct_values()
_output_checks(out, correct_out2)
def test_irrigation_trigger(update_data=False):
print('testing irrigation trigger')
# note this is linked to test_leap, so any inputs changes there should be mapped here
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 15
matrix_weather.loc[:, 'irr_trig'] = 0.5
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = 1 # irrigation to 100% of field capacity
doy_irr = list(range(305, 367)) + list(range(1, 91))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, 'test_irrigation_trigger_output.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_irrigation_fraction(update_data=False):
print('testing irrigation fraction')
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 10
matrix_weather.loc[:, 'irr_trig'] = 1
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = .60 # irrigation of 60% of what is needed to get to field capacity
doy_irr = list(range(305, 367)) + list(range(1, 91))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, 'test_irrigation_fraction_output.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_water_short(update_data=False):
print('testing water shortage')
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 5
matrix_weather.loc[matrix_weather.index > '2015-08-01', 'max_irr'] = 15
matrix_weather.loc[:, 'irr_trig'] = 0.8
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = .90 # irrigation to 90% of field capacity
doy_irr = list(range(305, 367)) + list(range(1, 91))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, 'test_water_short_output.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_short_season(update_data=False):
print('testing short season')
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 10
matrix_weather.loc[:, 'irr_trig'] = 1
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = .90 # irrigation to 90% of field capacity
doy_irr = list(range(305, 367)) + list(range(1, 61))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, 'test_short_season_output.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_variable_irr_trig_targ(update_data=False):
print('testing time variable irrigation triggers and targets')
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 10
matrix_weather.loc[:, 'irr_trig'] = 0.5
matrix_weather.loc[matrix_weather.index > '2013-08-01', 'irr_trig'] = 0.7
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather.loc[(matrix_weather.index < '2012-08-01'), 'irr_targ'] = 0.8
matrix_weather.loc[(matrix_weather.index > '2015-08-01'), 'irr_targ'] = 0.8
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = 1
doy_irr = list(range(305, 367)) + list(range(1, 61))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, 'test_variable_irr_trig_targ.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_irr_paw(update_data=False):
test_nm = 'test_irr_paw'
print('testing: ' + test_nm)
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 5
matrix_weather.loc[:, 'irr_trig'] = 0.5
matrix_weather.loc[:, 'irr_targ'] = 0.9
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = 1 # irrigation to 100% of field capacity
doy_irr = list(range(305, 367)) + list(range(1, 91))
params['irr_frm_paw'] = 1
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_pet_calculation(update_data=False):
# note this test was not as throughrougly investigated as it was not needed for my work stream
print('testing pet calculation')
params, matrix_weather, days_harvest, doy_irr = establish_peyman_input()
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, dll_path='default',
supply_pet=False)
data_path = os.path.join(test_dir, 'test_pet_calculation.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
# Manual Harvest tests
def test_fixed_harvest_man(update_data=False):
test_nm = 'test_fixed_harvest_man'
print('testing: ' + test_nm)
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
params['fixed_removal'] = 1
params['opt_harvfrin'] = 1
days_harvest = base_manual_harvest_data()
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 2500
days_harvest.loc[idx, 'harv_targ'] = 1000
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 1000
days_harvest.loc[idx, 'harv_targ'] = 10
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2017-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 2000
days_harvest.loc[idx, 'harv_targ'] = 100
days_harvest.loc[idx, 'weed_dm_frac'] = 0
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_harv_trig_man(update_data=False):
# test manaual harvesting dates with a set trigger, weed fraction set to zero
test_nm = 'test_harv_trig_man'
print('testing: ' + test_nm)
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
params['fixed_removal'] = 0
params['opt_harvfrin'] = 1
days_harvest = base_manual_harvest_data()
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 0.5
days_harvest.loc[idx, 'harv_trig'] = 2500
days_harvest.loc[idx, 'harv_targ'] = 2200
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 1000
days_harvest.loc[idx, 'harv_targ'] = 500
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2017-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 1500
days_harvest.loc[idx, 'harv_targ'] = 1000
days_harvest.loc[idx, 'weed_dm_frac'] = 0
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_weed_fraction_man(update_data=False):
# test manual harvesting trig set to zero +- target with weed fraction above 0
test_nm = 'test_weed_fraction_man'
print('testing: ' + test_nm)
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
params['fixed_removal'] = 0
params['opt_harvfrin'] = 1
days_harvest = base_manual_harvest_data()
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 0.5
days_harvest.loc[idx, 'harv_trig'] = 2500
days_harvest.loc[idx, 'harv_targ'] = 2200
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 1000
days_harvest.loc[idx, 'harv_targ'] = 500
days_harvest.loc[idx, 'weed_dm_frac'] = 0.5
idx = days_harvest.date >= '2017-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 1500
days_harvest.loc[idx, 'harv_targ'] = 1000
days_harvest.loc[idx, 'weed_dm_frac'] = 1
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
# automatic harvesting tests
def test_auto_harv_trig(update_data=False):
test_nm = 'test_auto_harv_trig'
print('testing: ' + test_nm)
# test auto harvesting dates with a set trigger, weed fraction set to zero
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
params['opt_harvfrin'] = 1
days_harvest = base_auto_harvest_data(matrix_weather)
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 3000
days_harvest.loc[idx, 'harv_targ'] = 2000
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 0.75
days_harvest.loc[idx, 'harv_trig'] = 2500
days_harvest.loc[idx, 'harv_targ'] = 1500
days_harvest.loc[idx, 'weed_dm_frac'] = 0
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, auto_harvest=True)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = | pd.read_csv(data_path, index_col=0) | pandas.read_csv |
from flask import Flask, render_template, request, redirect, url_for,session
import os
from os.path import join, dirname, realpath
from joblib import dump,load
import xgboost
import pandas as pd
import sklearn
import numpy as np
from flask import Flask, render_template, redirect, request, session
app = Flask(__name__)
# enable debugging mode
app.config["DEBUG"] = True
# secret key is needed for session
app.secret_key = 'Munir4638143'
# Upload folder
UPLOAD_FOLDER = 'static/files'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# Root URL
@app.route('/')
def index():
# Set The upload HTML template '\templates\index.html'
prediction=session.get('prediction')
return render_template('index.html',prediction_text='The provider is {}'.format(prediction))
# Get the uploaded files
@app.route("/", methods=['POST'])
def uploadFiles():
# get the uploaded file
uploaded_inpatient = request.files['inpatient']
if uploaded_inpatient.filename != '':
inpatient_path = os.path.join(app.config['UPLOAD_FOLDER'], uploaded_inpatient.filename)
# set the file path
uploaded_inpatient.save(inpatient_path)
uploaded_outpatient = request.files['outpatient']
if uploaded_outpatient.filename != '':
outpatient_path = os.path.join(app.config['UPLOAD_FOLDER'], uploaded_outpatient.filename)
# set the file path
uploaded_outpatient.save(outpatient_path)
uploaded_benefeciary = request.files['benefeciary']
if uploaded_benefeciary.filename != '':
benefeciary_path = os.path.join(app.config['UPLOAD_FOLDER'], uploaded_benefeciary.filename)
# set the file path
uploaded_benefeciary.save(benefeciary_path)
df_benefeciary=pd.read_csv(benefeciary_path)
df_inpatient=pd.read_csv(inpatient_path)
df_outpatient=pd.read_csv(outpatient_path)
print("\nReaded all csv's.")
print(df_benefeciary.columns)
print(df_inpatient.columns)
print(df_outpatient.columns)
Provider='PRV51001'
dfInpatientProvider=df_inpatient[df_inpatient['Provider']==Provider]
dfOutpatientProvider=df_outpatient[df_outpatient['Provider']==Provider]
## COMBINING df_inpatient AND df_outpatient
df_patient_data=pd.concat([dfInpatientProvider,dfOutpatientProvider])
#joining df_patient_data and df_beneficiary using BeneID
Train_ProviderWithPatient_data=pd.merge(df_patient_data,df_benefeciary,left_on='BeneID',right_on='BeneID',how='inner')
## AGE
# creating a timestamp for date '2009-12-01' to measure the age of live benefeciaries from this date
x = | pd.to_datetime('2009-12-01',format='%Y-%m-%d') | pandas.to_datetime |
'''
/*******************************************************************************
* Copyright 2016-2019 Exactpro (Exactpro Systems Limited)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
'''
import numpy
import matplotlib.pyplot as plt
import scipy.stats as stats
import pandas
import datetime
import calendar
class RelativeFrequencyChart:
# returns coordinates for each chart column
def get_coordinates(self, data, bins): # bins - chart columns count
self.btt = numpy.array(list(data))
self.y, self.x, self.bars = plt.hist(self.btt, weights=numpy.zeros_like(self.btt) + 1. / self.btt.size, bins=bins)
return self.x, self.y
class FrequencyDensityChart:
def get_coordinates_histogram(self, data, bins):
self.btt = numpy.array(list(data))
self.y, self.x, self.bars = plt.hist(self.btt, bins=bins, density=True)
return self.x, self.y
def get_coordinates_line(self, data):
try:
self.btt = numpy.array(list(data))
self.density = stats.kde.gaussian_kde(list(data))
self.x_den = numpy.linspace(0, data.max(), data.count())
self.density = self.density(self.x_den)
return self.x_den, self.density
except numpy.linalg.linalg.LinAlgError:
return [-1], [-1]
class DynamicChart:
def get_coordinates(self, frame, step_size):
self.plot = {} # chart coordinates
self.dynamic_bugs = []
self.x = []
self.y = []
self.plot['period'] = step_size
if step_size == 'W-SUN':
self.periods = DynamicChart.get_periods(self, frame, step_size) # separates DataFrame to the specified periods
if len(self.periods) == 0:
return 'error'
self.cumulative = 0 # cumulative total of defect submission for specific period
for self.period in self.periods:
# checks whether the first day of period is Monday (if not then we change first day to Monday)
if pandas.to_datetime(self.period[0]) < pandas.to_datetime(frame['Created_tr']).min():
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(frame['Created_tr']).min()) &
(pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(self.period[1]))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min())))
self.y.append(self.cumulative)
else:
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(self.period[0]))
& (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(self.period[1]))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str((self.period[0])))
self.y.append(self.cumulative)
# check whether the date from new DataFrame is greater than date which is specified in settings
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(self.periods[-1][1]):
# processing of days which are out of full period set
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) > pandas.to_datetime(self.periods[-1][1]))
& (pandas.to_datetime(frame['Created_tr']) <=
pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(datetime.datetime.date(pandas.to_datetime(self.periods[-1][1], format='%Y-%m-%d')) + datetime.timedelta(days=1)))
self.y.append(self.cumulative)
self.dynamic_bugs.append(self.x)
self.dynamic_bugs.append(self.y)
self.plot['dynamic bugs'] = self.dynamic_bugs
self.cumulative = 0
return self.plot
if step_size in ['7D', '10D', '3M', '6M', 'A-DEC']:
self.count0 = 0
self.count1 = 1
self.periods = DynamicChart.get_periods(self, frame, step_size) # DataFrame separation by the specified periods
if len(self.periods) == 0:
return 'error'
self.cumulative = 0
self.countPeriodsList = len(self.periods) # count of calculated periods
self.count = 1
if self.countPeriodsList == 1:
if step_size == '7D':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr'])
< pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())
+datetime.timedelta(days=7)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=7)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+
datetime.timedelta(days=7))) & (pandas.to_datetime(frame['Created_tr'])
<= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=7), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '10D':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min()) & (pandas.to_datetime(frame['Created_tr']) < pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10))) & (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '3M':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= | pandas.to_datetime(frame['Created_tr']) | pandas.to_datetime |
#!/usr/bin/python3
import argparse
import os
import sys
import webbrowser
from datetime import timedelta
import numpy as np
import pandas as pd
import pandas_datareader.data as web
import requests_cache
from plotly import graph_objs as go
from plotly.subplots import make_subplots
from tqdm import tqdm
from finance_benchmark import config
def get_asset_data(assets_ticker, startdate):
"""Retrieve assets data from yahoo finance
Args:
assets_ticker ([str]): list of assets to download
startdate (str): start date
"""
df = | pd.DataFrame() | pandas.DataFrame |
# coding: utf-8
# ### Import
# In[1]:
from bs4 import BeautifulSoup
import requests
import numpy as np
import pandas as pd
import xgboost
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from sklearn.metrics import *
from IPython.core.display import Image
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix
from sklearn.tree import export_graphviz
import io
from sklearn.preprocessing import Imputer
import pydot
from sklearn import preprocessing
import lightgbm as lgb
from scipy.stats import mode
import re
from datetime import datetime
from lightgbm import plot_importance
import warnings
warnings.filterwarnings('ignore')
# ---
# ### Date read
# In[12]:
age_gender_bkts = pd.read_csv("age_gender_bkts.csv")
countries = pd.read_csv("countries.csv")
sessions = pd.read_csv("sessions.csv")
test_users = pd.read_csv("test_users.csv")
train_users_2 = pd.read_csv("train_users_2.csv")
sample_submission_NDF = pd.read_csv("sample_submission_NDF.csv")
merged_sessions = pd.read_csv("merged_sessions.csv")
# ---
# ### Date setting - Base1
# In[13]:
def pre_age_set_data(train_users_2, test_users):
check = pd.concat([train_users_2, test_users], ignore_index=True)
check["first_affiliate_tracked"] = check["first_affiliate_tracked"].replace(np.nan, "untracked")
check["date_account_created"] = pd.to_datetime(check["date_account_created"], format = "%Y-%m-%d")
check["timestamp_first_active"] = pd.to_datetime(check["timestamp_first_active"], format="%Y%m%d%H%M%S")
s_lag = check["timestamp_first_active"] - check["date_account_created"]
check["lag_days"] = s_lag.apply(lambda x : -1 * x.days)
check["lag_seconds"] = s_lag.apply(lambda x : x.seconds)
s_all_check = (check['age'] < 120) & (check['gender'] != '-unknown-')
check['faithless_sign'] = s_all_check.apply(lambda x : 0 if x == True else 1)
pre_age = check.drop("date_first_booking",axis = 1)
pre_age['date_account_created_y'] = pre_age["date_account_created"].apply(lambda x : x.year)
pre_age['date_account_created_m'] = pre_age["date_account_created"].apply(lambda x : x.month)
pre_age['date_account_created_d'] = pre_age["date_account_created"].apply(lambda x : x.day)
pre_age['timestamp_first_active_y'] = pre_age["timestamp_first_active"].apply(lambda x : x.year)
pre_age['timestamp_first_active_m'] = pre_age["timestamp_first_active"].apply(lambda x : x.month)
pre_age['timestamp_first_active_d'] = pre_age["timestamp_first_active"].apply(lambda x : x.day)
pre_age = pre_age.drop("date_account_created" , axis=1)
pre_age = pre_age.drop("timestamp_first_active" , axis=1)
return check, pre_age
# ---
# ### Date setting - Base2
# In[14]:
def pre_age_predict_data(pre_age):
pre_age['age'] = pre_age['age'].fillna(-1)
pre_age_sub = pre_age.filter(items = ['age', 'country_destination','id'])
pre_age_dum = pre_age.filter(items = ['affiliate_channel', 'affiliate_provider',
'first_affiliate_tracked', 'first_browser', 'first_device_type',
'language', 'signup_app', 'signup_flow',
'signup_method', 'date_account_created_y', 'date_account_created_m',
'date_account_created_d', 'timestamp_first_active_y',
'timestamp_first_active_m', 'timestamp_first_active_d',"lag_days","lag_seconds",
"faithless_sign"])
pre_age_dum[['date_account_created_y', 'date_account_created_m', 'date_account_created_d', 'timestamp_first_active_y','timestamp_first_active_m', 'timestamp_first_active_d']] = pre_age_dum[['date_account_created_y', 'date_account_created_m', 'date_account_created_d', 'timestamp_first_active_y', 'timestamp_first_active_m', 'timestamp_first_active_d']].astype(str)
pre_age_dum = pd.get_dummies(pre_age_dum)
pre_age_dum_con = pd.concat([pre_age_dum, pre_age_sub], axis=1)
pre_age_dum_con["age"] = pre_age_dum_con["age"].replace(-1, np.nan)
pre_age_mission = pre_age_dum_con[pre_age_dum_con["age"].isna()].reset_index()
pre_age_train = pre_age_dum_con[pre_age_dum_con["age"].notna()].reset_index()
pre_age_mission_test = pre_age_mission.drop("index", axis=1)
pre_age_train_test = pre_age_train.drop("index", axis=1)
pre_age_mission_test_drop = pre_age_mission_test.drop(['id', 'age', 'country_destination'], axis=1)
pre_age_train_test_drop = pre_age_train_test.drop(['id', 'age', 'country_destination'], axis=1)
return pre_age_mission_test, pre_age_train_test, pre_age_mission, pre_age_train, pre_age_mission_test_drop, pre_age_train_test_drop
# In[15]:
def pre_age_predict_data_cat(pre_age_train):
bins = [0, 15, 25, 35, 60, 9999]
labels = ["미성년자", "청년", "중년", "장년", "노년"]
cats = pd.cut(pre_age_train['age'], bins, labels=labels)
cats = pd.DataFrame(cats)
return cats
# ---
# ### Predict gender data setting - Only gender
# In[16]:
def add_gender(pre_age):
pred_gen_data = pd.read_csv("model_gen_lgb.csv")
pre_gen_sub = pre_age.filter(items = ['age', 'country_destination', 'id', 'gender'])
pre_gen_dum = pre_age.filter(items = ['affiliate_channel', 'affiliate_provider',
'first_affiliate_tracked', 'first_browser', 'first_device_type',
'language', 'signup_app', 'signup_flow',
'signup_method', 'date_account_created_y', 'date_account_created_m',
'date_account_created_d', 'timestamp_first_active_y',
'timestamp_first_active_m', 'timestamp_first_active_d',"lag_days","lag_seconds",
"faithless_sign"])
pre_gen_dum = pd.get_dummies(pre_gen_dum)
pre_gen_dum_con = pd.concat([pre_gen_dum, pre_gen_sub], axis=1)
pre_gen_dum_con["gender"] = pre_gen_dum_con["gender"].replace(['-unknown-', 'OTHER'], np.nan)
pre_gen_mission = pre_gen_dum_con[pre_gen_dum_con["gender"].isna()].reset_index()
pre_gen_train = pre_gen_dum_con[pre_gen_dum_con["gender"].notna()].reset_index()
pre_gen_mission_test = pre_gen_mission.drop("index", axis=1)
pre_gen_train_test = pre_gen_train.drop("index", axis=1)
pre_gen_mission_test_drop = pre_gen_mission_test.drop(['id', 'age', 'country_destination', "gender"], axis=1)
pre_gen_train_test_drop = pre_gen_train_test.drop(['id', 'age', 'country_destination', "gender"], axis=1)
pre_gen_mission_test_la = pd.concat([pre_gen_mission_test, pred_gen_data], axis=1)
pre_gen_mission_test_la = pre_gen_mission_test_la.drop("gender", axis=1)
pre_gen_mission_test_la = pre_gen_mission_test_la.rename(columns={"0": 'gender'})
last_gen_add = pd.concat([pre_gen_mission_test_la, pre_gen_train_test])
last_gen_add = last_gen_add.filter(items = ["id",'gender'])
return last_gen_add
# ---
# ### Holiday, Weekend, Day of week data setting - Only Holiday
# In[17]:
def holiday(train_users_2, test_users):
def get_holidays(year):
response = requests.get("https://www.timeanddate.com/calendar/custom.html?year="+str(year)+" &country=1&cols=3&df=1&hol=25")
dom = BeautifulSoup(response.content, "html.parser")
trs = dom.select("table.cht.lpad tr")
df = | pd.DataFrame(columns=["date", "holiday"]) | pandas.DataFrame |
# %% 说明
# ------------------------------------------------------------------->>>>>>>>>>
# 最后更新ID name的时候用这个脚本,从师兄的list汇总完成替换
# os.chdir("/Users/zhaohuanan/NutstoreFiles/MyNutstore/Scientific_research/2021_DdCBE_topic/Manuscript/20220311_My_tables")
# ------------------------------------------------------------------->>>>>>>>>>
# %% imports and settings
from pandarallel import pandarallel
import datar.all as r
from datar import f
import plotnine as p9
import os
import numpy as np
import pandas as pd
import seaborn as sns
import glob
sns.set()
pd.set_option("max_colwidth", 100) # column最大宽度
pd.set_option("display.width", 250) # dataframe宽度
pd.set_option("display.max_columns", None) # column最大显示数
pd.set_option("display.max_rows", 50) # row最大显示数
pandarallel.initialize() # 多线程设置,默认使用全部核心 nb_workers=24
# %% os.chdir
os.chdir(
"/Users/zhaohuanan/NutstoreFiles/MyNutstore/Scientific_research/2021_DdCBE_topic/TargetSeq/20220305_TargetSeq_bmat_alldone_add_v2"
)
os.listdir()
# %% load table all target
# load target-seq
df = pd.read_csv("table/20220305_DdCBE-target-seq-info.csv.gz")
df.drop("bmat_name", axis=1, inplace=True)
# # 查看treatment
print(df.treatment.value_counts())
# # 看一下na情况
print(df.info())
print(df.isna().sum())
# %%load base index
# 每个off-target region中指定base的TargetSeq ratio
df_idx = pd.read_excel(
"./table/20220305_DdCBE_only_one_mut_index_info.xlsx", sheet_name="Sheet1"
)
# print(df_idx)
# df_idx.info()
df_idx = df_idx[["region_id", "ref_base", "relative_pos"]].copy()
df_idx
# %% filter
# %%% filter_删除某些点
df = df[~(df.region_id == "ND516-share-1")].copy() # ND516-share-1 没做
df = df[~(df.region_id == "share-new-3")].copy() # share-new-3 扔掉了的点
df = df[~(df.region_id == "ND6-new-only17")].copy() # ND6-new-only17 扔掉了的点
# 查看所有位点
sorted(df.region_id.value_counts().index.tolist())
# %%% filter_删除某些replication
ls_treatment = [
# 脱靶验证系列1 【最终画barplot用】
'ND4-Det-Q5U',
'ND4-LIPO2000-Q5U',
'ND5-1-Det-Q5U',
'ND5-1-LIPO2000-Q5U',
'ND6-Det-Q5U',
'ND6-LIPO2000-Q5U',
'ND6-LTX-12-Q5U',
'untreat-Q5U'
]
df = df[df.treatment.map(lambda x: x in ls_treatment)].copy()
sorted(list(set(df.treatment.values)))
# %% fix
# 发现rep3有一个 ND5.1-new-only24,实际上是补实验替代rep1的
df[df.rep == 'rep3'].region_id.value_counts()
# ND5.1-new-only24 1140
# Name: region_id, dtype: int64
df.loc[df.rep == 'rep3', 'rep'] = 'rep1'
# %% merge all-target table and index table
df_one_base = df_idx.merge(df, how="left", on=["region_id", "ref_base", "relative_pos"])
# check df_one_base
# 135个点,没错
print(df_one_base.region_id.value_counts().index.__len__())
# %% calculating
# %%% 计算mut_ratio * 100
df_one_base["mut_ratio"] = df_one_base.mut_count / df_one_base.total_count * 100
df_one_base
# %%% filter C2T or G2A
df_one_base_filter_base = df_one_base[
((df_one_base.ref_base == "C") & (df_one_base.mut_base == "T"))
| (df_one_base.ref_base == "G") & (df_one_base.mut_base == "A")
].copy()
# %%% filter 用cutoff3(off) cutoff0(on)
def filter_cutoff(x):
if (x["cutoff"] == 3) and ("on-target" not in x["region_id"]):
return True
elif (x["cutoff"] == 0) and ("on-target" in x["region_id"]):
return True
else:
return False
df_one_base_filter_base = df_one_base_filter_base[
df_one_base_filter_base.apply(filter_cutoff, axis=1)
].copy()
df_use = df_one_base_filter_base.copy()
# check df_one_base
# 133个点,没错, 因为on-target不止count了一次
print(df_use.region_id.value_counts().index.__len__())
# %%% 计算mean
df_use1 = (
df_use
>> r.group_by(f.region_id, f.treatment)
>> r.summarise(mut_ratio_mean=np.mean(f.mut_ratio))
)
df_use2 = (
df_use
>> r.select(f.region_id, f.treatment, f.rep, f.mut_ratio)
>> r.left_join(df_use1)
)
df_plot = df_use2.copy()
df_plot
# %% plot
# 配色 https://blog.csdn.net/black000shirt/article/details/113724245
def off_barplot(df):
fig = (
p9.ggplot()
+ p9.geom_bar(
data=df,
mapping=p9.aes(x="region_id", y="mut_ratio_mean", fill="treatment"),
stat=p9.stats.stat_identity,
position=p9.positions.position_dodge(
width=0.9, preserve="total" # Bar width
),
color="black",
size=0.1,
raster=False,
)
+ p9.geom_point(
data=df,
mapping=p9.aes(
x="region_id",
y="mut_ratio",
# fill="treatment",
group="treatment",
),
# color="black",
position=p9.positions.position_dodge(
width=0.9, preserve="total" # Bar width
),
size=0.2,
)
+ p9.scale_x_discrete(name="Off-target sites")
+ p9.scale_y_continuous(
name="Editing ratio by Targeted deep sequencing (%)",
# breaks=np.arange(0, df.mut_ratio.max(), round(df.mut_ratio.max() / 10, 1)),
labels=lambda L: ["%.1f" % v for v in L],
)
+ p9.coord_flip()
+ p9.theme_classic()
# + p9.scale_fill_brewer(type="qualitative", palette="Set2") # 画share old的时候注释掉这一行不然颜色不够用
+ p9.ggtitle("Targeted deep sequencing ratio")
)
return fig
def off_barplot_log10(df):
# fix log10
df["mut_ratio"] = np.log10(df.mut_ratio) + 5
df["mut_ratio"] = df.mut_ratio.map(lambda x: x if x > 0 else 0)
df["mut_ratio_mean"] = np.log10(df.mut_ratio_mean) + 5
df["mut_ratio_mean"] = df.mut_ratio_mean.map(lambda x: x if x > 0 else 0)
# plot
fig = (
p9.ggplot()
+ p9.geom_bar(
data=df,
mapping=p9.aes(x="region_id", y="mut_ratio_mean", fill="treatment"),
stat=p9.stats.stat_identity,
position=p9.positions.position_dodge(
width=0.9, preserve="total" # Bar width
),
color="black",
size=0.1,
raster=False,
)
+ p9.geom_point(
data=df,
mapping=p9.aes(
x="region_id",
y="mut_ratio",
# fill="treatment",
group="treatment",
),
# color="black",
position=p9.positions.position_dodge(
width=0.9, preserve="total" # Bar width
),
size=0.2,
)
+ p9.scale_x_discrete(name="Off-target sites")
+ p9.scale_y_continuous(
# fix log10
limits=np.log10([0.00001, 100]) + 5,
breaks=np.log10([0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100]) + 5,
labels=[0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100],
name="log10(Editing ratio by Targeted deep sequencing (%))",
)
+ p9.coord_flip()
+ p9.theme_classic()
# + p9.scale_fill_brewer(type="qualitative", palette="Set2") # 画share old的时候注释掉这一行不然颜色不够用
+ p9.ggtitle("Targeted deep sequencing ratio")
)
return fig
def off_jitterplot(df):
fig = (
p9.ggplot(
data=df,
mapping=p9.aes(x="treatment", y="mut_ratio", fill="treatment"),
)
+ p9.geom_jitter(
**{
"stat": "identity",
"na_rm": False,
"width": 0.1,
"height": 0,
"random_state": None,
}
)
+ p9.geom_boxplot(alpha=0.2)
+ p9.scale_y_continuous(breaks=np.arange(0, df.mut_ratio.max(), 0.5))
+ p9.theme_classic()
)
return fig
# %%% final list
df_mut_ratio = df_use2[['region_id', 'treatment', 'rep', 'mut_ratio']].copy()
df_mut_ratio_mean = df_use1.copy()
ls_nd4 = ([f'ND4-only-{i}' for i in range(1, 11)]
+ [f'ND4-new-only{i}' for i in range(1, 14)]
+ [f'ND516-share-{i}' for i in range(2, 14)]
+ [f'share-new-{i}' for i in [1, 2, 4, 5, 11, 12, 13, 14, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, 31]])
ls_nd5_1 = ([f'ND5.1-only-{i}' for i in range(1, 11)]
+ [f'ND5.1-new-only{i}' for i in range(1, 26)]
+ [f'ND516-share-{i}' for i in range(2, 14)]
+ [f'share-new-{i}' for i in [1, ] + list(range(5, 27)) + [28, 29, 30, 31, 32]])
ls_nd6 = ([f'ND6-only-{i}' for i in range(1, 14)]
+ [f'ND6-new-only{i}' for i in range(1, 17)]
+ [f'ND516-share-{i}' for i in range(2, 14)]
+ [f'share-new-{i}' for i in [5, 8, ] + list(range(11, 33))])
# %% 处理、点的名字归类
df_plot['belongto'] = None
flt4 = df_plot.apply(lambda x: (x['region_id'] in ls_nd4) & (x['treatment'] in ['ND4-Det-Q5U', 'ND4-LIPO2000-Q5U', 'untreat-Q5U']), axis=1)
flt5_1 = df_plot.apply(lambda x: (x['region_id'] in ls_nd5_1) & (x['treatment'] in ['ND5-1-Det-Q5U', 'ND5-1-LIPO2000-Q5U', 'untreat-Q5U']), axis=1)
flt6 = df_plot.apply(lambda x: (x['region_id'] in ls_nd6) & (x['treatment'] in ['ND6-Det-Q5U', 'ND6-LIPO2000-Q5U', 'ND6-LTX-12-Q5U', 'untreat-Q5U']), axis=1)
df_plot.loc[flt4, 'belongto'] = 'ND4'
df_plot.loc[flt5_1, 'belongto'] = 'ND5.1'
df_plot.loc[flt6, 'belongto'] = 'ND6'
df_mut_ratio['belongto'] = None
flt4 = df_mut_ratio.apply(lambda x: (x['region_id'] in ls_nd4) & (x['treatment'] in ['ND4-Det-Q5U', 'ND4-LIPO2000-Q5U', 'untreat-Q5U']), axis=1)
flt5_1 = df_mut_ratio.apply(lambda x: (x['region_id'] in ls_nd5_1) & (x['treatment'] in ['ND5-1-Det-Q5U', 'ND5-1-LIPO2000-Q5U', 'untreat-Q5U']), axis=1)
flt6 = df_mut_ratio.apply(lambda x: (x['region_id'] in ls_nd6) & (x['treatment'] in ['ND6-Det-Q5U', 'ND6-LIPO2000-Q5U', 'ND6-LTX-12-Q5U', 'untreat-Q5U']), axis=1)
df_mut_ratio.loc[flt4, 'belongto'] = 'ND4'
df_mut_ratio.loc[flt5_1, 'belongto'] = 'ND5.1'
df_mut_ratio.loc[flt6, 'belongto'] = 'ND6'
flt4 = df_mut_ratio.apply(lambda x: (x['region_id'] in ls_nd4) & (x['treatment'] in ['ND4-Det-Q5U', 'ND4-LIPO2000-Q5U', 'untreat-Q5U']), axis=1)
flt5_1 = df_mut_ratio.apply(lambda x: (x['region_id'] in ls_nd5_1) & (x['treatment'] in ['ND5-1-Det-Q5U', 'ND5-1-LIPO2000-Q5U', 'untreat-Q5U']), axis=1)
flt6 = df_mut_ratio.apply(lambda x: (x['region_id'] in ls_nd6) & (x['treatment'] in ['ND6-Det-Q5U', 'ND6-LIPO2000-Q5U', 'ND6-LTX-12-Q5U', 'untreat-Q5U']), axis=1)
df_mut_ratio.loc[flt4, 'belongto'] = 'ND4'
df_mut_ratio.loc[flt5_1, 'belongto'] = 'ND5.1'
df_mut_ratio.loc[flt6, 'belongto'] = 'ND6'
df_mut_ratio_mean['belongto'] = None
flt4 = df_mut_ratio_mean.apply(lambda x: (x['region_id'] in ls_nd4) & (x['treatment'] in ['ND4-Det-Q5U', 'ND4-LIPO2000-Q5U', 'untreat-Q5U']), axis=1)
flt5_1 = df_mut_ratio_mean.apply(lambda x: (x['region_id'] in ls_nd5_1) & (x['treatment'] in ['ND5-1-Det-Q5U', 'ND5-1-LIPO2000-Q5U', 'untreat-Q5U']), axis=1)
flt6 = df_mut_ratio_mean.apply(lambda x: (x['region_id'] in ls_nd6) & (x['treatment'] in ['ND6-Det-Q5U', 'ND6-LIPO2000-Q5U', 'ND6-LTX-12-Q5U', 'untreat-Q5U']), axis=1)
df_mut_ratio_mean.loc[flt4, 'belongto'] = 'ND4'
df_mut_ratio_mean.loc[flt5_1, 'belongto'] = 'ND5.1'
df_mut_ratio_mean.loc[flt6, 'belongto'] = 'ND6'
flt4 = df_mut_ratio_mean.apply(lambda x: (x['region_id'] in ls_nd4) & (x['treatment'] in ['ND4-Det-Q5U', 'ND4-LIPO2000-Q5U', 'untreat-Q5U']), axis=1)
flt5_1 = df_mut_ratio_mean.apply(lambda x: (x['region_id'] in ls_nd5_1) & (x['treatment'] in ['ND5-1-Det-Q5U', 'ND5-1-LIPO2000-Q5U', 'untreat-Q5U']), axis=1)
flt6 = df_mut_ratio_mean.apply(lambda x: (x['region_id'] in ls_nd6) & (x['treatment'] in ['ND6-Det-Q5U', 'ND6-LIPO2000-Q5U', 'ND6-LTX-12-Q5U', 'untreat-Q5U']), axis=1)
df_mut_ratio_mean.loc[flt4, 'belongto'] = 'ND4'
df_mut_ratio_mean.loc[flt5_1, 'belongto'] = 'ND5.1'
df_mut_ratio_mean.loc[flt6, 'belongto'] = 'ND6'
df_mut_ratio_mean.loc[df_mut_ratio_mean.treatment == 'ND1-Det-Q5U', 'belongto'] = 'ND1'
df_mut_ratio_mean.loc[df_mut_ratio_mean.treatment == 'ND4-L1333N-Det-Q5U', 'belongto'] = 'ND4-L1333N'
df_mut_ratio_mean.loc[df_mut_ratio_mean.treatment == 'ND4-L1397C-Det-Q5U', 'belongto'] = 'ND4-L1397C'
df_mut_ratio_mean.loc[df_mut_ratio_mean.treatment == 'ND5-1-L1333N-Det-Q5U', 'belongto'] = 'ND5-1-L1333N'
df_mut_ratio_mean.loc[df_mut_ratio_mean.treatment == 'ND5-3-L1397C-Det-Q5U', 'belongto'] = 'ND5-3-L1397C'
# %% map mpmat_index
df_index_m = pd.read_excel("/Users/zhaohuanan/NutstoreFiles/MyNutstore/Scientific_research/2021_DdCBE_topic/20220224_TargetSeq_IDs/20220307_name_match_target-seq_list-True-mpmat-index.xlsx")
# %% map V4 list name
# ND4 onlys
df_info = df_mut_ratio.copy()
df_name4 = pd.read_excel("/Users/zhaohuanan/NutstoreFiles/MyNutstore/Scientific_research/2021_DdCBE_topic/20220224_TargetSeq_IDs/20220312-DdCBE-off_target_type.FinallistV4.CheckPrimer.AddV4ID.xlsx")
df_out_info = df_name4[['region_id', 'off_target_id.V4.ND4', 'off_target_id.V4.ND5.1', 'off_target_id.V4.ND6']].copy()
df_out_info.columns = ['mpmat_index', 'ND4', 'ND5.1', 'ND6']
df_out = df_info[df_info['region_id'].map(lambda x: 'on-target' not in x)].copy()
df_out_final = df_out.merge(df_index_m, how='left').merge(df_out_info, how='left')
df_out_final.groupby(['belongto', 'treatment', 'rep']).describe()
df_out_final[(df_out_final['ND5.1'].isna()) & (df_out_final.belongto == "ND5.1")].region_id.value_counts().index.tolist()
# # 临时Fix
df_out_final.loc[
((df_out_final['ND4'].isna()) & (df_out_final.belongto == "ND4")
| df_out_final.loc[df_out_final.treatment == 'untreat-Q5U', 'ND4'].isna()), 'ND4'] = df_out_final.loc[(df_out_final['ND4'].isna()) & (df_out_final.belongto == "ND4"), 'mpmat_index']
df_out_final.loc[
((df_out_final['ND5.1'].isna()) & (df_out_final.belongto == "ND5.1")
| df_out_final.loc[df_out_final.treatment == 'untreat-Q5U', 'ND5.1'].isna()), 'ND5.1'] = df_out_final.loc[(df_out_final['ND5.1'].isna()) & (df_out_final.belongto == "ND5.1"), 'mpmat_index']
# ls_noname4 = ['ND5.1-only-2',
# 'ND5.1-only-4',
# 'ND5.1-only-8',
# 'ND5.1-only-10',
# 'ND5.1-new-only24',
# 'ND5.1-new-only17',
# 'ND5.1-new-only21']
# df_index_m[df_index_m.region_id.map(lambda x: x in ls_noname4)]
df_out_final.groupby(['belongto', 'treatment', 'rep']).describe()
# %% 输出graphpad表格
# %%% 输出转染条件比较的数据
# df_out_final[(df_out_final.region_id=='ND516-share-10') & (df_out_final.treatment=='untreat-Q5U')]
# df_out_final[(df_out_final.region_id=='ND516-share-12') & (df_out_final.treatment=='untreat-Q5U')]
# only的old ,share 的old
sort_for_table = False # True for fig False for table
df_trans_4 = df_out_final[
df_out_final.region_id.map(lambda x: ("ND4" in x) and ("new" not in x) and ("on-target" not in x)) # ND4 old only
| (df_out_final.region_id.map(lambda x: ("share" in x) and ("new" not in x) and ("on-target" not in x))
& df_out_final.treatment.map(lambda x: ("ND4" in x) | ("untreat-Q5U" in x))) # ND4 old share
].copy()
df_trans_4 = pd.pivot_table(
data=df_trans_4,
index=["region_id", 'ND4'],
# index=["region_id"],
columns=["treatment", "rep"],
values=["mut_ratio"],
)
df_trans_4["sort_key"] = df_trans_4[
[
("mut_ratio", "ND4-Det-Q5U", "rep1"),
("mut_ratio", "ND4-Det-Q5U", "rep2"),
]
].mean(axis=1)
df_trans_4.sort_values("sort_key", ascending=sort_for_table, inplace=True)
df_trans_4.drop(columns="sort_key", inplace=True)
df_trans_4
# only的old ,share 的old
df_trans_5 = df_out_final[
df_out_final.region_id.map(lambda x: ("ND5.1" in x) and ("new" not in x) and ("on-target" not in x)) # ND5.1 old only
| (df_out_final.region_id.map(lambda x: ("share" in x) and ("new" not in x) and ("on-target" not in x))
& df_out_final.treatment.map(lambda x: ("ND5-1" in x) | ("untreat-Q5U" in x))) # ND5.1 old share
].copy()
df_trans_5 = pd.pivot_table(
data=df_trans_5,
index=["region_id", 'ND5.1'],
columns=["treatment", "rep"],
values=["mut_ratio"],
)
df_trans_5["sort_key"] = df_trans_5[
[
("mut_ratio", "ND5-1-Det-Q5U", "rep1"),
("mut_ratio", "ND5-1-Det-Q5U", "rep2"),
]
].mean(axis=1)
df_trans_5.sort_values("sort_key", ascending=sort_for_table, inplace=True)
df_trans_5.drop(columns="sort_key", inplace=True)
df_trans_5
# only的old ,share 的old
df_trans_6 = df_out_final[
df_out_final.region_id.map(lambda x: ("ND6" in x) and ("new" not in x) and ("on-target" not in x)) # ND6 old only
| (df_out_final.region_id.map(lambda x: ("share" in x) and ("new" not in x) and ("on-target" not in x))
& df_out_final.treatment.map(lambda x: ("ND6" in x) | ("untreat-Q5U" in x))) # ND6 old share
].copy()
df_trans_6 = pd.pivot_table(
data=df_trans_6,
index=["region_id", 'ND6'],
# index=["region_id"],
columns=["treatment", "rep"],
values=["mut_ratio"],
)
df_trans_6["sort_key"] = df_trans_6[
[
("mut_ratio", "ND6-Det-Q5U", "rep1"),
("mut_ratio", "ND6-Det-Q5U", "rep2"),
]
].mean(axis=1)
df_trans_6.sort_values("sort_key", ascending=sort_for_table, inplace=True)
df_trans_6.drop(columns="sort_key", inplace=True)
df_trans_6
# %%% 输出Detect-seq自己的的验证数据
# only的old new ,share 的old new
df_comp_4 = df_out_final[
(
df_out_final.region_id.map(lambda x: ("ND4" in x) and ("on-target" not in x)) # only的old new
| (df_out_final.region_id.map(lambda x: ("share" in x) and ("on-target" not in x))
& df_out_final.treatment.map(lambda x: ("ND4" in x) | ("untreat-Q5U" in x))) # share 的old new
)
& df_out_final.treatment.map(lambda x: 'LIPO2000' not in x) # 排除lipo2000
].copy()
df_comp_4 = pd.pivot_table(
data=df_comp_4,
index=["region_id", 'ND4'],
# index=["region_id"],
columns=["treatment", "rep"],
values=["mut_ratio"],
)
df_comp_4["sort_key"] = df_comp_4[
[
("mut_ratio", "ND4-Det-Q5U", "rep1"),
("mut_ratio", "ND4-Det-Q5U", "rep2"),
]
].mean(axis=1)
df_comp_4.sort_values("sort_key", ascending=sort_for_table, inplace=True)
df_comp_4.drop(columns="sort_key", inplace=True)
df_comp_4
# only的old new ,share 的old new
df_comp_5 = df_out_final[
(
df_out_final.region_id.map(lambda x: ("ND5.1" in x) and ("on-target" not in x)) # only的old new
| (df_out_final.region_id.map(lambda x: ("share" in x) and ("on-target" not in x))
& df_out_final.treatment.map(lambda x: ("ND5-1" in x) | ("untreat-Q5U" in x))) # share 的old new
)
& df_out_final.treatment.map(lambda x: 'LIPO2000' not in x) # 排除lipo2000
].copy()
df_comp_5 = pd.pivot_table(
data=df_comp_5,
index=["region_id", 'ND5.1'],
# index=["region_id"],
columns=["treatment", "rep"],
values=["mut_ratio"],
)
df_comp_5["sort_key"] = df_comp_5[
[
("mut_ratio", "ND5-1-Det-Q5U", "rep1"),
("mut_ratio", "ND5-1-Det-Q5U", "rep2"),
]
].mean(axis=1)
df_comp_5.sort_values("sort_key", ascending=sort_for_table, inplace=True)
df_comp_5.drop(columns="sort_key", inplace=True)
df_comp_5
# only的old new ,share 的old new
df_comp_6 = df_out_final[
(
df_out_final.region_id.map(lambda x: ("ND6" in x) and ("on-target" not in x)) # only的old new
| (df_out_final.region_id.map(lambda x: ("share" in x) and ("on-target" not in x))
& df_out_final.treatment.map(lambda x: ("ND6" in x) | ("untreat-Q5U" in x))) # share 的old new
)
& df_out_final.treatment.map(lambda x: ('LIPO2000' not in x) and ('LTX-12' not in x)) # 排除lipo2000 和 ltx 12
].copy()
df_comp_6 = pd.pivot_table(
data=df_comp_6,
index=["region_id", 'ND6'],
# index=["region_id"],
columns=["treatment", "rep"],
values=["mut_ratio"],
)
df_comp_6["sort_key"] = df_comp_6[
[
("mut_ratio", "ND6-Det-Q5U", "rep1"),
("mut_ratio", "ND6-Det-Q5U", "rep2"),
]
].mean(axis=1)
df_comp_6.sort_values("sort_key", ascending=sort_for_table, inplace=True)
df_comp_6.drop(columns="sort_key", inplace=True)
df_comp_6
# %% save trans 和 comp的表格 v1 20220308
os.chdir("/Users/zhaohuanan/NutstoreFiles/MyNutstore/Scientific_research/2021_DdCBE_topic/Manuscript/20220311_My_tables")
df_trans_4 = df_trans_4.fillna(0)
df_comp_4 = df_comp_4.fillna(0)
# df_trans_4.index = df_trans_4.index.map(lambda x: '_'.join(x))
# df_trans_5.index = df_trans_5.index.map(lambda x: '_'.join(x))
# df_trans_6.index = df_trans_6.index.map(lambda x: '_'.join(x))
# df_comp_4.index = df_comp_4.index.map(lambda x: '_'.join(x))
# df_comp_5.index = df_comp_5.index.map(lambda x: '_'.join(x))
# df_comp_6.index = df_comp_6.index.map(lambda x: '_'.join(x))
with pd.ExcelWriter('20220308_TargetSeqInfoForBarPlot.xlsx') as writer: # doctest: +SKIP
df_trans_4.to_excel(writer, sheet_name='ND4_TRANS')
df_trans_5.to_excel(writer, sheet_name='ND5.1_TRANS')
df_trans_6.to_excel(writer, sheet_name='ND6_TRANS')
df_comp_4.to_excel(writer, sheet_name='ND4_COMP')
df_comp_5.to_excel(writer, sheet_name='ND5.1_COMP')
df_comp_6.to_excel(writer, sheet_name='ND6_COMP')
for df_final in [df_trans_4, df_trans_5, df_trans_6, df_comp_4, df_comp_5, df_comp_6]:
for col in df_final:
df_final[col] = df_final[col].map(lambda x: x if x >= 0.001 else 0.001)
with pd.ExcelWriter('20220308_TargetSeqInfoForBarPlot_fixmin.xlsx') as writer: # doctest: +SKIP
df_trans_4.to_excel(writer, sheet_name='ND4_TRANS')
df_trans_5.to_excel(writer, sheet_name='ND5.1_TRANS')
df_trans_6.to_excel(writer, sheet_name='ND6_TRANS')
df_comp_4.to_excel(writer, sheet_name='ND4_COMP')
df_comp_5.to_excel(writer, sheet_name='ND5.1_COMP')
df_comp_6.to_excel(writer, sheet_name='ND6_COMP')
# %% save trans 和 comp的表格 v2 add seq 20220311
# 这里的话前面的sort_for_table变量要设置为False再跑一遍!!!!!
os.chdir("/Users/zhaohuanan/NutstoreFiles/MyNutstore/Scientific_research/2021_DdCBE_topic/Manuscript/20220311_My_tables")
df_trans_4_for_tab = df_trans_4.copy()
df_trans_5_for_tab = df_trans_5.copy()
df_trans_6_for_tab = df_trans_6.copy()
df_comp_4_for_tab = df_comp_4.copy()
df_comp_5_for_tab = df_comp_5.copy()
df_comp_6_for_tab = df_comp_6.copy()
# %% next 整理seq信息
# %% load relative index
# %% load seq info
ls_fa = sorted(glob.glob('reference.fasta/*.fa'))
ls_fa_context = []
for i in ls_fa:
with open(i, 'r') as f:
cont = f.readlines()
cont = [i.strip() for i in cont]
ls_fa_context.append(
[cont[0][1:], cont[1]]
)
df_seq = pd.DataFrame(ls_fa_context, columns=['region_id', 'seq'])
df_seq2 = df_seq.merge(df_idx)
# df_seq2 = df_seq.merge(df_idx, how='left')
# df_seq2[df_seq2.ref_base.isna()].region_id
# Out[101]:
# 0 ND1-on-target
# 17 ND4-only-11
# 62 ND5.3-on-target
# 63 ND516-share-1
# Name: region_id, dtype: object
def find_seq(x):
# print(x)
idx = x['relative_pos'] - 1
seq = x['seq']
fwd = seq[idx - 10: idx]
on = seq[idx]
lat = seq[idx + 1: idx + 11]
return f"{fwd}[{on}]{lat}"
df_seq2['seq'] = df_seq2.apply(find_seq, axis=1)
df_seq2
# %% merge seq
dt_seq = {
'seq_trans4': df_trans_4_for_tab,
'seq_trans5': df_trans_5_for_tab,
'seq_trans6': df_trans_6_for_tab,
'seq_comp4': df_comp_4_for_tab,
'seq_comp5': df_comp_5_for_tab,
'seq_comp6': df_comp_6_for_tab,
}
with | pd.ExcelWriter('20220311_TargetSeqInfoForBarPlot_seqinfos.xlsx') | pandas.ExcelWriter |
#coding=utf-8
import os
import CSZLData
import CSZLFeatureEngineering as FE
import CSZLModel
import CSZLDisplay
import CSZLUtils
import pandas as pd
import datetime
import time
class CSZLWorkflow(object):
"""各种workflow 主要就是back testing"""
def BackTesting(self):
#Default_folder_path='./temp/'
Default_folder_path='D:/temp2/'
#zzzz=CSZLData.CSZLData("20220101","20220301")
#zzzz.getDataSet_all(Default_folder_path)
#"20150801","20220425"
dayA='20130101'#nomal/small
dayB='20170301'
#dayB='20200101'
#dayA='20150801'#nomal/small
#dayB='20220425'
dayC='20170301'
dayD='20220425'
dayA='20150801'#nomal/small
dayB='20220425'
dayC='20220201'
dayD='20220513'
#dayA='20190101'#nomal/small
#dayB='20190601'
#dayC='20210101'
#dayD='20220425'
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FE05()
#zzzz=FE.CSZLFeatureEngineering("20170301","20220301",Default_folder_path)
#testpath=zzzz.FE03()
zzzz=FE.CSZLFeatureEngineering(dayC,dayD,Default_folder_path)
testpath=zzzz.FE05()
#zzzz=FE.CSZLFeatureEngineering("20220101","20220408",Default_folder_path)
#testpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20190101","20190301",Default_folder_path)
#trainpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20220101","20220301",Default_folder_path)
#testpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20190101","20210101",Default_folder_path)
#trainpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20220101","20220401",Default_folder_path)
#testpath=zzzz.FE03()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain(trainpath)
#resultpath2=cur_model.LGBmodelpredict(trainpath,cur_model_path)
resultpath=cur_model.LGBmodelpredict(testpath,cur_model_path)
#cur_model_path2=cur_model.LGBmodelretrain(trainpath,resultpath2)
#resultpath3=cur_model.LGBmodelrepredict(testpath,resultpath,cur_model_path2)
resultpath=cur_model.MixOutputresult_groupbalence(testpath,cur_model_path)
today_df = pd.read_csv(resultpath,index_col=0,header=0)
#lastday=today_df['trade_date'].max()
#today_df['ts_code']=today_df['ts_code'].apply(lambda x : x[:-3])
#copy_df=today_df[today_df['trade_date']==lastday]
#copy_df.to_csv("Today_NEXT_predict.csv")
curdisplay=CSZLDisplay.CSZLDisplay()
curdisplay.Topk_nextopen(resultpath)
pass
def BackTesting_static_0501(self):
#生成需要的数据集
nowTime=datetime.datetime.now()
delta = datetime.timedelta(days=63)
delta_one = datetime.timedelta(days=1)
LastTime=nowTime-delta_one
month_ago = LastTime - delta
month_ago_next=month_ago+delta_one
Day_start=month_ago_next.strftime('%Y%m%d')
Day_end=LastTime.strftime('%Y%m%d')
Day_now=nowTime.strftime('%Y%m%d')
#Default_folder_path='./temp2/'
Default_folder_path='D:/temp2/'
dayA='20150801'#nomal/small
dayB='20220425'
#dayA='20150801'#nomal/small
#dayB='20220425'
dayC=Day_start
dayD=Day_now
#dayD='20220506'
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FE03()
zzzz=FE.CSZLFeatureEngineering(dayC,dayD,Default_folder_path)
testpath=zzzz.FE03()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain(trainpath)
cur_model.LGBmodelpredict(testpath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence(testpath,cur_model_path)
today_df = pd.read_csv(resultpath,index_col=0,header=0)
lastday=today_df['trade_date'].max()
today_df['ts_code']=today_df['ts_code'].apply(lambda x : x[:-3])
copy_df=today_df[today_df['trade_date']==lastday]
copy_df.to_csv("Today_NEXT_predict.csv")
#curdisplay=CSZLDisplay.CSZLDisplay()
#curdisplay.Topk_nextopen(resultpath)
pass
def BackTesting_static_0515(self):
#生成需要的数据集
nowTime=datetime.datetime.now()
delta = datetime.timedelta(days=63)
delta_one = datetime.timedelta(days=1)
LastTime=nowTime-delta_one
month_ago = LastTime - delta
month_ago_next=month_ago+delta_one
Day_start=month_ago_next.strftime('%Y%m%d')
Day_end=LastTime.strftime('%Y%m%d')
Day_now=nowTime.strftime('%Y%m%d')
#Default_folder_path='./temp2/'
Default_folder_path='D:/temp2/'
dayA='20150801'#nomal/small
dayB='20220425'
#dayA='20150801'#nomal/small
#dayB='20220425'
dayC=Day_start
dayD=Day_now
dayD='20220513'
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FE05()
zzzz=FE.CSZLFeatureEngineering(dayC,dayD,Default_folder_path)
testpath=zzzz.FE05()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain(trainpath)
cur_model.LGBmodelpredict(testpath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence(testpath,cur_model_path)
today_df = pd.read_csv(resultpath,index_col=0,header=0)
lastday=today_df['trade_date'].max()
today_df['ts_code']=today_df['ts_code'].apply(lambda x : x[:-3])
copy_df=today_df[today_df['trade_date']==lastday]
copy_df.to_csv("Today_NEXT_predict.csv")
#curdisplay=CSZLDisplay.CSZLDisplay()
#curdisplay.Topk_nextopen(resultpath)
pass
def BackTesting2(self):
#Default_folder_path='./temp/'
Default_folder_path='D:/temp2/'
#zzzz=CSZLData.CSZLData("20220101","20220301")
#zzzz.getDataSet_all(Default_folder_path)
zzzz=FE.CSZLFeatureEngineering("20130101","20170301",Default_folder_path)
trainpath=zzzz.FE03()
zzzz=FE.CSZLFeatureEngineering("20170301","20220301",Default_folder_path)
testpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20220101","20220408",Default_folder_path)
#testpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20190101","20190301",Default_folder_path)
#trainpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20220101","20220301",Default_folder_path)
#testpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20190101","20200301",Default_folder_path)
#trainpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20210101","20220301",Default_folder_path)
#testpath=zzzz.FE03()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain(trainpath)
resultpath2=cur_model.LGBmodelpredict(trainpath,cur_model_path)
resultpath=cur_model.LGBmodelpredict(testpath,cur_model_path)
cur_model_path2=cur_model.LGBmodelretrain(trainpath,resultpath2)
resultpath3=cur_model.LGBmodelrepredict(testpath,resultpath,cur_model_path2)
#resultpath=cur_model.MixOutputresult(testpath,cur_model_path)
curdisplay=CSZLDisplay.CSZLDisplay()
curdisplay.Topk_nextopen(resultpath3)
pass
def RealTimePredict(self):
Default_folder_path='./temp2/'
#Default_folder_path='D:/temp2/'
#cur_model_path="D:/temp2/FE0320190101to20210101_0/LGBmodeltrainLGBmodel_003"
#cur_model_path="D:/temp2/FE0320150801to20220425_0/LGBmodeltrainLGBmodel_003"
cur_model_path="./temp2/FE0520150801to20220425_0/LGBmodeltrainLGBmodel_003"
#是否需要重新生成
if False:
#zzzz=FE.CSZLFeatureEngineering("20190101","20210101",Default_folder_path)
#trainpath=zzzz.FE03()
zzzz=FE.CSZLFeatureEngineering("20150801","20220425",Default_folder_path)
trainpath=zzzz.FE05()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain(trainpath)
#生成需要的数据集
nowTime=datetime.datetime.now()
delta = datetime.timedelta(days=63)
delta_one = datetime.timedelta(days=1)
LastTime=nowTime-delta_one
month_ago = LastTime - delta
month_ago_next=month_ago+delta_one
Day_start=month_ago_next.strftime('%Y%m%d')
Day_end=LastTime.strftime('%Y%m%d')
Day_now=nowTime.strftime('%Y%m%d')
CSZLData.CSZLDataWithoutDate.get_realtime_quotes(Default_folder_path,Day_start,Day_end)
zzzz=FE.CSZLFeatureEngineering(Day_start,Day_end,Default_folder_path)
#zzzz=FE.CSZLFeatureEngineering("20220301","20220420",Default_folder_path)
#trainpath=zzzz.FE03()
#bbbb=pd.read_pickle(trainpath)
#aaaa=bbbb.head(10)
#aaaa=aaaa.to_csv("tttt.csv")
zzzz.FE05_real(int(Day_now))
featurepath="Today_Joinfeature.csv"
cur_model=CSZLModel.CSZLModel()
#resultpath2=cur_model.LGBmodelpredict(trainpath,cur_model_path)
resultpath=cur_model.LGBmodelpredict(featurepath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence(featurepath,cur_model_path,resultpath)
pass
def RealTimePredict_CB(self):
Default_folder_path='./temp2/'
#Default_folder_path='D:/temp2/'
#cur_model_path="D:/temp2/FE0320190101to20210101_0/LGBmodeltrainLGBmodel_003"
#cur_model_path="D:/temp2/FE0320150801to20220425_0/LGBmodeltrainLGBmodel_003"
cur_model_path="./temp2/FECB0320130101to20220501_0/LGBmodeltrain_CBLGBmodel_003"
#是否需要重新生成
if False:
dayA='20130101'#nomal/small
dayB='20220501'
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FECB02()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain_CB(trainpath)
#生成需要的数据集
nowTime=datetime.datetime.now()
delta = datetime.timedelta(days=63)
delta_one = datetime.timedelta(days=1)
LastTime=nowTime-delta_one
month_ago = LastTime - delta
month_ago_next=month_ago+delta_one
Day_start=month_ago_next.strftime('%Y%m%d')
Day_end=LastTime.strftime('%Y%m%d')
Day_now=nowTime.strftime('%Y%m%d')
CSZLData.CSZLDataWithoutDate.get_realtime_quotes_CB(Default_folder_path,Day_start,Day_end)
zzzz=FE.CSZLFeatureEngineering(Day_start,Day_end,Default_folder_path)
zzzz.FECB03_real(int(Day_now))
featurepath="Today_Joinfeature_CB.csv"
cur_model=CSZLModel.CSZLModel()
#resultpath2=cur_model.LGBmodelpredict(trainpath,cur_model_path)
resultpath=cur_model.LGBmodelpredict_CB(featurepath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence_CB(featurepath,cur_model_path,resultpath)
pass
def CBBackTesting(self):
Default_folder_path='D:/temp2/'
dayA='20130101'#nomal/small
dayB='20220501'
dayC='20220301'
dayD='20220505'
#dayD='20220506'
dayD='20220513'
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FECB03()
zzzz=FE.CSZLFeatureEngineering(dayC,dayD,Default_folder_path)
testpath=zzzz.FECB03()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain_CB(trainpath)
cur_model.LGBmodelpredict_CB(testpath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence_CB(testpath,cur_model_path)
curdisplay=CSZLDisplay.CSZLDisplay()
curdisplay.Topk_nextopen_CB(resultpath)
pass
def CBBackTesting_static_0508(self):
#生成需要的数据集
nowTime=datetime.datetime.now()
delta = datetime.timedelta(days=63)
delta_one = datetime.timedelta(days=1)
LastTime=nowTime-delta_one
month_ago = LastTime - delta
month_ago_next=month_ago+delta_one
Day_start=month_ago_next.strftime('%Y%m%d')
Day_end=LastTime.strftime('%Y%m%d')
Day_now=nowTime.strftime('%Y%m%d')
#Default_folder_path='./temp2/'
Default_folder_path='D:/temp2/'
dayA='20130101'#nomal/small
dayB='20220301'
dayC=Day_start
dayD=Day_now
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FECB02()
zzzz=FE.CSZLFeatureEngineering(dayC,dayD,Default_folder_path)
testpath=zzzz.FECB02()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain_CB(trainpath)
cur_model.LGBmodelpredict_CB(testpath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence_CB(testpath,cur_model_path)
today_df = pd.read_csv(resultpath,index_col=0,header=0)
lastday=today_df['trade_date'].max()
today_df['ts_code']=today_df['ts_code'].apply(lambda x : x[:-3])
copy_df=today_df[today_df['trade_date']==lastday]
copy_df.to_csv("Today_NEXT_predict_CB.csv")
#curdisplay=CSZLDisplay.CSZLDisplay()
#curdisplay.Topk_nextopen_CB(resultpath)
pass
def CBBackTesting_static_0515(self):
#生成需要的数据集
nowTime=datetime.datetime.now()
delta = datetime.timedelta(days=63)
delta_one = datetime.timedelta(days=1)
LastTime=nowTime-delta_one
month_ago = LastTime - delta
month_ago_next=month_ago+delta_one
Day_start=month_ago_next.strftime('%Y%m%d')
Day_end=LastTime.strftime('%Y%m%d')
Day_now=nowTime.strftime('%Y%m%d')
#Default_folder_path='./temp2/'
Default_folder_path='D:/temp2/'
dayA='20130101'#nomal/small
dayB='20220501'
dayC=Day_start
dayD=Day_now
dayD='20220513'
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FECB03()
zzzz=FE.CSZLFeatureEngineering(dayC,dayD,Default_folder_path)
testpath=zzzz.FECB03()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain_CB(trainpath)
cur_model.LGBmodelpredict_CB(testpath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence_CB(testpath,cur_model_path)
today_df = | pd.read_csv(resultpath,index_col=0,header=0) | pandas.read_csv |
import click
import os
import csv
import re
import functools
import pandas as pd
import numpy as np
import datetime
import common
import shutil
class InvalidSubscenario(Exception):pass
class CSVLocation(object):
"""Documentation for CSVLocation
class which acts as wrapper over folder, csv_location
"""
def __init__(self, csv_location):
"""
param csv_location - a path where all csvs are strored in gridpath format
"""
self.csv_location = csv_location
def get_scenarios_csv(self):
return os.path.join(self.csv_location, "scenarios.csv")
def get_csv_data_master(self):
return os.path.join(self.csv_location, "csv_data_master.csv")
class Scenario(CSVLocation):
"""Documentation for Scenario
it stores all subscenarios in given scenario
"""
def __init__(self, csv_location, scenario_name):
super().__init__(csv_location)
scenarios_csv = self.get_scenarios_csv()
self.scenario_name = scenario_name
self.subscenarios = {}
with open(scenarios_csv) as f:
csvf = csv.DictReader(f)
for row in csvf:
subscenario_name = row['optional_feature_or_subscenarios']
subscenario_id = row[scenario_name]
if subscenario_id.strip()!="":
self.subscenarios[subscenario_name] = int(subscenario_id)
setattr(self, subscenario_name, int(subscenario_id))
def get_subscenarios(self):
return [Subscenario(name, v, self.csv_location) for name, v in self.subscenarios.items()]
def get_subscenario(self, name):
if name in self.subscenarios:
return Subscenario(name, self.subscenarios[name], self.csv_location)
else:
raise KeyError(f"Scenario {self.scenario_name} does not have subscenario {name}")
def __str__(self):
return f"Senario<{self.scenario_name}>"
def __repr__(self):
return str(self)
def test_scenario_class():
rpo30 = Scenario("/home/vikrant/programming/work/publicgit/gridpath/db/csvs_mh", "rpo30")
assert rpo30.scenario_name == "rpo30"
assert rpo30.csv_location == "/home/vikrant/programming/work/publicgit/gridpath/db/csvs_mh"
assert rpo30.temporal_scenario_id == 5
assert rpo30.load_zone_scenario_id == 1
assert rpo30.load_scenario_id == 1
assert rpo30.project_portfolio_scenario_id == 1
assert rpo30.project_operational_chars_scenario_id == 3
assert rpo30.project_availability_scenario_id == 3
assert rpo30.project_load_zone_scenario_id == 1
assert rpo30.project_specified_capacity_scenario_id == 1
assert rpo30.project_specified_fixed_cost_scenario_id == 1
assert rpo30.solver_options_id == 1
assert rpo30.temporal_scenario_id == 5
class Subscenario(CSVLocation):
"""Documentation for Scenario
"""
def __init__(self, name, id_, csv_location):
super().__init__(csv_location)
self.name = name
self.id_ = id_
try:
self.__find_files()
except Exception as e:
print(e)
print("Creating empty Subscenario")
@functools.lru_cache(maxsize=None)
def __getattr__(self, name):
files = [os.path.basename(f) for f in self.files]
attrs = [".".join(f.split(".")[:-1]) for f in files]
if name in attrs:
file = [f for f in self.get_files() if f.endswith(f"{name}.csv")][0]
return pd.read_csv(file)
elif name == "data":
file = [f for f in self.get_files()][0]
return pd.read_csv(file)
def get_name(self):
return self.name
def get_id(self, arg):
return self._id
def __str__(self):
return f"{self.name}<{self.id_}>"
def __repr__(self):
return str(self)
def get_folder(self):
master = self.get_csv_data_master()
return self.get_subscenario_folder(self.get_csv_data_master(),
self.name,
self.csv_location)
@staticmethod
def get_subscenario_folder(master, name, csv_location):
with open(master) as f:
csvf = csv.DictReader(f)
folder = [row['path'] for row in csvf if row['subscenario']==name][0]
return os.path.join(csv_location, folder)
def __find_files(self):
master = self.get_csv_data_master()
p = re.compile(f"{self.id_}_.*")
p_ = re.compile(f".*-{self.id_}.*.csv")
with open(master) as f:
csvf = csv.DictReader(f)
rows = [row for row in csvf if row['subscenario']==self.name]
sub_types = [row['subscenario_type'] for row in rows]
project_input = [row['project_input'] for row in rows]
filenames = [r['filename'] for r in rows if r['filename']]
files = []
if "dir_subsc_only" in sub_types:
self.sub_type = "dir_subsc_only"
subfolders = [f for f in os.listdir(self.get_folder()) if p.match(f)]
path = os.path.join(self.get_folder(), subfolders[0])
files = [os.path.join(path, f) for f in filenames]
self.files = files
elif "simple" in sub_types:
self.sub_type = "simple"
path = self.get_folder()
if '1' in project_input:
p = p_
files = [os.path.join(path, f) for f in os.listdir(self.get_folder()) if p.match(f)]
else:
raise InvalidSubscenario(f"Invalid subscenario {self.name}")
self.files = files
def get_files(self):
return self.files
def writedata(self, subfolder, **kwargs):##FIXME
def checkexisting(folder):
return os.path.exists(folder) and self.files
def writefile(**data):
for name, value in data.items():
path = os.path.join(scid_folder,name+".csv")
if os.path.exists(path):
print(f"File {name}.csv exists, skipping!")
else:
value.to_csv(path, index=False, date_format='%d-%m-%Y %H:%M')
if subfolder:
folder = self.get_folder()
scid_folder = os.path.join(folder, subfolder)
else:
scid_folder = self.get_folder()
try:
os.makedirs(scid_folder)
except Exception as e:
print(e)
print("Not creating folder, probably it exists")
for k, v in kwargs.items():
writefile(**{k:v})
self.__find_files()
def mergedata(self, merge_on:str, **kwargs):
"""
only for those subscenarios for which data merging is possbible.
for example for exogenous_availability_scenario_id, data of
different temporal settings can be stored in same file.
"""
scid_folder = self.get_folder()
try:
os.makedirs(scid_folder)
except Exception as e:
print(e)
print("Not creating folder, probably it exists")
for name, value in kwargs.items():
path = os.path.join(scid_folder, name + ".csv")
if os.path.exists(path):
df = pd.read_csv(path)
df = df.merge(value, on=merge_on)
else:
df = value
df.to_csv(path, index=False)
BALANCING_TYPE = {"year":1,
"month":12,
"day":365}
#"hour":365*24,
#"15min":365*96}
GRAN = {1:"yearly",
12:"monthly",
365:"daily",
365*24:"hourly",
365*96:"15min"}
def test_temporal_scenario_id_class():
tmp5 = Subscenario(5, "/home/vikrant/programming/work/publicgit/gridpath/db/csvs_mh")
s = tmp5.structure
assert len(s.timepoint[s.spinup_or_lookahead==0])==365*96
def remove_subcenario(s):
for f in s.get_files():
os.remove(f)
folder = os.path.dirname(s.get_files()[0])
os.removedirs(folder)
def test_create_temporal_subscenario():
t1 = Subscenario('temporal_scenario_id', 1, CSV_LOCATION)
t75 = create_temporal_subscenario(t1, "month", 'daily', 75)
assert len(t75.structure)==365
assert len(t75.horizon_timepoints)==12
t76 = create_temporal_subscenario(t1, "year", 'daily', 76)
assert len(t76.structure)==365
assert len(t76.horizon_timepoints)==1
remove_subcenario(t75)
remove_subcenario(t76)
def create_temporal_subscenario(base:Subscenario,
balancing_type_horizon:str,
granularity:str,
id_:int):
structure,horizon_params,horizon_timepoints, period_params = create_temporal_subscenario_data(base, balancing_type_horizon, granularity, id_)
steps = len(structure['subproblem_id'].unique())
granularity = len(structure[structure.spinup_or_lookahead==0])
d = GRAN
gran = d[granularity]
subfolder = f"{id_}_{steps}steps_{gran}_timepoints"
tscid = Subscenario(name='temporal_scenario_id',
id_=id_,
csv_location=base.csv_location)
tscid.writedata(subfolder,
description=pd.DataFrame({f"{steps} steps":[],
f"{gran} timepoints":[]}),
structure=structure,
horizon_params=horizon_params,
horizon_timepoints=horizon_timepoints,
period_params=period_params)
return tscid
def write_endo_project_file(filename, data, headers):
if os.path.exists(filename):
print(f"File {filename} exists, skipping overwrite.")
with open(filename, "w") as f:
csvf = csv.DictWriter(f, headers)
csvf.writerow(data)
def get_project_filename(project, subscenario_id, subscenario_name):
return f"{project}-{subscenario_id}-{subscenario_name}.csv"
def create_availability_subscenario(csv_location:str,
availability:str,
endogenous:str,
description:str,
id_:int
):
"""
csv_location -> csv_location
"""
pascid = Subscenario(name = 'project_availability_scenario_id',
id_= id_,
csv_location = csv_location
)
data = pd.read_csv(availability)
availdata = data[data.endogenous_availability_scenario_id.notnull()]
if endogenous:
agg_data = pd.read_csv(endogenous)
for project, endoscid_ in availdata[
['project','endogenous_availability_scenario_id']].drop_duplicates().values:
endoscid = Subscenario('endogenous_availability_scenario_id',
endoscid_,
csv_location)
print(endoscid_,"**"*10)
df = agg_data[(agg_data.subscenario_id == endoscid_) & (agg_data.project==project)]
cols = list(df.columns)
cols_ = ['project', 'subscenario_id', 'subscenario_name']
for c in cols_:
cols.remove(c)
projectdata = df[cols]
filename = common.get_subscenario_csvpath(project,
'endogenous_availability_scenario_id',
int(float(endoscid_)),
endoscid.csv_location,
"endo")
f = os.path.basename(filename.split(".")[0])
endoscid.writedata(None, **{f:projectdata})
pascid.writedata(None, **{f"{id_}_availability_{description}":data})
return pascid
def get_subset(temporal):
s = temporal.structure
s = s[s.spinup_or_lookahead==0]
s = s[['timepoint', 'timestamp']]
s['timestamp'] = pd.to_datetime(s.timestamp, format='%d-%m-%Y %H:%M')
s.sort_values('timestamp', inplace=True)
s.set_index('timestamp', inplace=True)
return s
def create_horizon_or_timepoint(base, size):
pad = len(str(size+1))
if size==1:
return np.array([int(base)])
return np.array([int(str(base) + str(i).zfill(pad)) for i in range(1, size+1)])
def create_horizon(period, n):
return create_horizon_or_timepoint(period, n)
def create_timepoints(horizon, n):
return create_horizon_or_timepoint(horizon, n)
def create_horizon_params(base,
balancing_type_horizon,
structure,
):
grp = structure.groupby("subproblem_id")
period = base.period_params['period'].iloc[0]
hname = 'horizon_'+balancing_type_horizon
df = grp[[hname]].first().reset_index()
n = len(df)
subproblem_id = np.array(range(1,n+1))
balancing_type_horizon_ = np.array([balancing_type_horizon]*n)
boundary = np.array(["linear"]*n)
df['balancing_type_horizon'] = balancing_type_horizon
df['boundary'] = boundary
df.rename(columns={hname:'horizon'}, inplace=True)
return df[["subproblem_id",
"balancing_type_horizon",
"horizon",
"boundary"]]
def create_period_params(base):
return base.period_params.copy()
def get_delta(granularity):
if granularity=="15min":
return datetime.timedelta(minutes=15)
elif granularity=="hourly":
return datetime.timedelta(hours=1)
elif granularity=="daily":
return datetime.timedelta(days=1)
def get_subproblem_id_index(balancing_type_horizon, s1):
"""
s1 is dataframe with columns m, d, period
"""
if balancing_type_horizon == 'month':
index = pd.Index(data=s1['m'].unique(), name='m')
elif balancing_type_horizon == 'year':
index = pd.Index(data=s1['period'].unique(), name='period')
elif balancing_type_horizon == 'day':
index = pd.MultiIndex.from_arrays([s1['d'], s1['m']], names=['d','m']).unique()
else:
raise ValueError(f"Incompatible balancing_type_horizon {balancing_type_horizon}")
return index
def get_subproblem_id(index):
subproblem_id = pd.Series(data=range(1, len(index)+1), index=index, name='subproblem_id')
return subproblem_id
def get_groupby_cols(granularity):
common = ['stage_id', 'period']
if granularity == "daily":
grpbycols = ['d', 'm']
elif granularity == "monthly":
grpbycols = ['m']
elif granularity == "yearly":
grpbycols = []
elif granularity == "hourly" :
grpbycols = ['d','m','H']
elif granularity == "15min" :
grpbycols = ['d','m','H','M']
else:
raise ValueError(f"Incompatible granularity {granularity}")
grpbycols.extend(common)
return grpbycols
def split_timestamp(s):
ts = s.timestamp.str.split("-", expand=True)
ts.columns = ['d', 'm', 'ys']
ts1 = ts.ys.str.split(expand=True)
del ts['ys']
ts['y'] = ts1[0]
ts2 = ts1[1].str.split(":", expand=True)
ts['H'] = ts2[0]
ts['M'] = ts2[1]
return ts
def get_groupsby_structure(base:Subscenario, granularity):
structure = base.structure
s = structure[structure.spinup_or_lookahead==0]
ts = split_timestamp(s)
return s[['timepoint', 'timepoint_weight', 'timestamp']].join(ts)
def subset_data(data, temporal:Subscenario):
s = temporal.structure
s = s[s.spinup_or_lookahead==0]
t = s[['timepoint']]
return t.merge(data, on='timepoint')[data.columns]
def collapse(data,
columns,
basetemporal:Subscenario,
granularity,
subtemporal:Subscenario,
weighted=False,
operation='sum'):
ts = get_groupsby_structure(basetemporal, granularity)
col_names = [c for c in data.columns]
grpbycols = get_groupby_cols(granularity)
grpbycols.remove('period')
grpbycols.remove('stage_id')
data = data.merge(ts, on='timepoint')
if weighted:
for c in columns:
data[c] = data[c]*data.timepoint_weight
grp = data.groupby(grpbycols)
opgrp = grp[columns]
weight = grp['timepoint_weight']
r = opgrp.sum()
for c in columns:
r[c] = r[c]/weight.sum()
else:
grp = data.groupby(grpbycols)
opgrp = grp[columns]
op = getattr(opgrp, operation)
r = op()
timestamp = grp[['timestamp']+[c for c in col_names if c not in columns and c!='timepoint']].first()
r = r.join(timestamp)
s = subtemporal.structure
s = s[s.spinup_or_lookahead==0][['timepoint','timestamp']]
final_r = r.merge(s, on='timestamp')
final_r = final_r.sort_values(by="timepoint").reset_index()
return final_r[col_names]
def create_structure(base:Subscenario,
balancing_type_horizon:str,
granularity:str):
ns = [key for key, value in GRAN.items() if value == granularity]
if not ns:
raise ValueError("Invalid granularity specified. valid granularity values are {}".format(GRAN.values()))
structure = base.structure
s = structure[structure.spinup_or_lookahead==0]
ts = split_timestamp(s)
s1 = s.join(ts)
grpbycols = get_groupby_cols(granularity)
fcols = ['timepoint_weight', 'previous_stage_timepoint_map','spinup_or_lookahead','linked_timepoint', 'month', 'hour_of_day', 'timestamp']
scols = ['number_of_hours_in_timepoint']
grp = s1.groupby(grpbycols)
firstcols = grp[fcols].first()
sumcols = grp[scols].sum()
#sumcols = sumcols.astype(int)
index = get_subproblem_id_index(balancing_type_horizon, s1)
subproblem_id = get_subproblem_id(index)
horizon = pd.Series(data=create_horizon(base.period_params['period'].iloc[0], len(index)),
index = index,
name = "horizon_" + balancing_type_horizon)
s_ = firstcols.join(sumcols).join(subproblem_id).join(horizon)
s_ = create_timepoint_col(s_, horizon)
s_['linked_timepoint'].iloc[:] = np.nan
colnames = ['subproblem_id','stage_id','timepoint','period','number_of_hours_in_timepoint','timepoint_weight','previous_stage_timepoint_map','spinup_or_lookahead','linked_timepoint','month','hour_of_day','timestamp',horizon.name]
return s_[colnames]
def create_timepoint_col(s_, horizon):
"""
s_ is dataframe containing columns with name timestamp in '%d-%m-%Y %H:%M' format
make use
of these columns to make timepoint
"""
s_['timestamp_'] = | pd.to_datetime(s_.timestamp, format='%d-%m-%Y %H:%M') | pandas.to_datetime |
import xarray as _xr
import pathlib as _pl
import numpy as _np
# import cartopy.crs as ccrs
# import metpy
# from scipy import interpolate
# from datetime import datetime, timedelta
from mpl_toolkits.basemap import Basemap as _Basemap
from pyproj import Proj as _Proj
import urllib as _urllib
from pyquery import PyQuery as _pq
import pandas as _pd
import matplotlib.pyplot as _plt
import mpl_toolkits.basemap as _basemap
import os as _os
import numba as _numba
import multiprocessing as _mp
import functools as _functools
def open_file(p2f, verbose = False):
ds = _xr.open_dataset(p2f)
product_name = ds.attrs['dataset_name'].split('_')[1]
if verbose:
print(f'product name: {product_name}')
if product_name == 'ABI-L2-AODC-M6':
classinst = ABI_L2_AODC_M6(ds)
elif product_name[:-1] == 'ABI-L2-MCMIPC-M':
classinst = ABI_L2_MCMIPC_M6(ds)
elif product_name == 'ABI-L2-LSTC-M6':
classinst = ABI_L2_LSTC_M6(ds)
if verbose:
print(f'identified as: ABI-L2-LSTC-M6')
else:
classinst = GeosSatteliteProducts(ds)
if verbose:
print('not identified')
# assert(False), f'The product {product_name} is not known yet, programming required.'
return classinst
class ProjectionProject(object):
def __init__(self,sites,
# list_of_files = None,
# download_files = False,
# file_processing_state = 'raw',
path2folder_raw = '/mnt/telg/data/smoke_events/20200912_18_CO/goes_raw/ABI_L2_AODC_M6_G16/',
path2interfld = '/mnt/telg/tmp/class_tmp_inter',
path2resultfld = '/mnt/telg/data/smoke_events/20200912_18_CO/goes_projected/ABI_L2_AODC_M6_G16/',
generate_missing_folders = False):
self.sites = sites
self.list_of_files = None
self.download_files = False
self.path2folder_raw = _pl.Path(path2folder_raw)
self.path2interfld_point = _pl.Path(path2interfld).joinpath('point')
self.path2interfld_area = _pl.Path(path2interfld).joinpath('area')
self.path2resultfld_point = _pl.Path(path2resultfld).joinpath('point')
self.path2resultfld_area = _pl.Path(path2resultfld).joinpath('area')
outputfld = [self.path2interfld_point, self.path2interfld_area , self.path2resultfld_point, self.path2resultfld_area]
if generate_missing_folders:
for fld in outputfld:
try:
fld.mkdir(exist_ok=True)
except:
fld.parent.mkdir(exist_ok=True)
for fld in outputfld:
assert(fld.is_dir()), f'no such folder {fld.as_posix()}, set generate_missing_folders to true to generate folders'
self._workplan = None
@property
def workplan(self):
# list_of_files = None,
# download_files = False,
# file_processing_state = 'raw',
# path2folder_raw = '/mnt/telg/data/smoke_events/20200912_18_CO/goes_raw/ABI_L2_AODC_M6_G16/',
# path2interfld = '/mnt/telg/tmp/class_tmp_inter',
# path2resultfld = '/mnt/telg/data/smoke_events/20200912_18_CO/goes_projected/ABI_L2_AODC_M6_G16/'):
"""
Parameters
----------
list_of_files : TYPE, optional
If this is None then the workplan for the concatination is generated
(the files in the intermediate folder will be used). The default is
None.
file_location: str ('ftp', 'local'), optional
file_processing_state: str ('raw', 'intermediate'), optional
path2folder_raw : TYPE, optional
DESCRIPTION. The default is '/mnt/telg/tmp/class_tmp/'.
path2interfld : TYPE, optional
DESCRIPTION. The default is '/mnt/telg/tmp/class_tmp_inter'.
path2resultfld : TYPE, optional
DESCRIPTION. The default is '/mnt/telg/projects/GOES_R_ABI/data/sattelite_at_gml'.
Returns
-------
df : TYPE
DESCRIPTION.
"""
if isinstance(self._workplan, type(None)):
if not self.download_files:
# if file_processing_state == 'intermediate':
if 0:
pass
if isinstance(self.list_of_files, type(None)):
df = _pd.DataFrame(list(self.path2interfld.glob('*.nc')), columns=['path2intermediate_file'])
else:
assert(False), 'programming required'
# elif file_processing_state == 'raw':
elif 1:
if isinstance(self.list_of_files, type(None)):
df = _pd.DataFrame(list(self.path2folder_raw.glob('*.nc')), columns=['path2tempfile'])
else:
assert(False), 'programming required'
df['path2intermediate_file_point'] =df.apply(lambda row: self.path2interfld_point.joinpath(row.path2tempfile.name), axis = 1)
df['path2intermediate_file_area'] =df.apply(lambda row: self.path2interfld_area.joinpath(row.path2tempfile.name), axis = 1)
else:
assert(False), 'not an option'
elif self.download_files:
assert(False), 'this will probably not work'
df = | _pd.DataFrame(self.list_of_files, columns=['fname_on_ftp']) | pandas.DataFrame |
from flask import Flask, render_template, url_for, request,jsonify
import numpy as np
import pandas as pd
import json
import operator
import time
import random
import glob
#Initialize Flask App
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/rawdata')
def organizeDataToJson(date_time,value_array=None):
dictionary = dict()
i = 0
for stamp in date_time:
#Create the key being the day
datestr = '{}-{}-{}'.format(stamp.year, stamp.month, stamp.day)
if datestr not in dictionary:
dictionary[datestr] = { 'AM' : [ 0 for x in range(12) ], 'PM' : [ 0 for x in range(12) ] }
if value_array is None:
if stamp.hour < 12:
dictionary[datestr]['AM'][stamp.hour] += 1
else:
dictionary[datestr]['PM'][stamp.hour - 12] += 1
else:
if stamp.hour < 12:
dictionary[datestr]['AM'][stamp.hour] += value_array[i]
else:
dictionary[datestr]['PM'][stamp.hour - 12] += value_array[i]
i+=1
return json.dumps(dictionary)
@app.route('/musicdata')
def createMusicDict():
d = dict()
dd = dict()
for stamp in music_data.Date:
#stamp = datetime.strftime("%Y-%m-%d %H:%M")
datestr = '{}-{}-{}'.format(stamp.year, stamp.month, stamp.day)
#print('{} {}:{}'.format(datestr, stamp.hour, stamp.minute))
if datestr not in d:
# create placeholders for am/pm
d[datestr] = { 'AM' : [ 0 for x in range(12) ], 'PM' : [ 0 for x in range(12) ] }
if stamp.hour < 12:
d[datestr]['AM'][stamp.hour] += 1
else:
d[datestr]['PM'][stamp.hour - 12] += 1
i = 0
j = 0
for day,value in d.items():
am_array = np.array(value['AM'])
am_average = np.average(am_array)
am_array[am_array==0] = int(am_average)
pm_array = np.array(value['PM'])
pm_average = np.average(pm_array)
pm_array[pm_array==0] = int(pm_average)
if i%6 == 0:
datestr = 'week_'+str(j)
if datestr not in dd:
dd[datestr] = { 'AM' : [ 0 for x in range(12) ], 'PM' : [ 0 for x in range(12) ] }
j+=1
#print(dd[datestr])
if am_average > 1:
dd[datestr]['AM'] = list(map(operator.add, dd[datestr]['AM'], value['AM']))
if pm_average > 1:
dd[datestr]['PM'] = list(map(operator.add, dd[datestr]['PM'], value['PM']))
i+=1
for week,value in dd.items():
#
am_array = np.array(value['AM'])
am_average = np.average(am_array)
am_array[am_array==0] = int(am_average)
#add the new array
value['AM'] = am_array.tolist()
#
pm_array = np.array(value['PM'])
pm_average = np.average(pm_array)
pm_array[pm_array==0] = int(pm_average)
#add the new array
value['PM'] = pm_array.tolist()
return json.dumps(dd)
@app.route('/createcsv', methods=['POST'])
def createcsv():
# POST request
if request.method == 'POST':
print('Saving Client data as CSV')
#since all the arrays are not the same length
jsonObj = request.get_json(force=True)
fileName = jsonObj['fileName']
answers = jsonObj['answers']
df = pd.DataFrame.from_dict(answers)
#path = '/Answers/'
path = '/images/Exp2/Correct-Answers-Experiment-2/'
df.to_csv(path+fileName+'.csv',sep=',')
#df.to_csv("data.csv")
return 'Data saved as csv file!', 200
'''Experiment 1'''
@app.route('/stackedbarchart')
def stackedbarchart():
return render_template('stackedbarchart.html',
humidityData=humidityData,
trafficData=trafficData,
energyData=energyData,
uberData=uberData)
@app.route('/adjacentbarchart')
def adjacentbarchart():
return render_template('adjacentbarchart.html',
humidityData=humidityData,
trafficData=trafficData,
energyData=energyData,
uberData=uberData)
@app.route('/combinedbarchart')
def combinedbarchart():
return render_template('combinedbarchart.html',
humidityData=humidityData,
trafficData=trafficData,
energyData=energyData,
uberData=uberData)
'''Experiment 2'''
@app.route('/overlaidbarchart')
def overlaidbarchart():
return render_template('overlaidbarchart.html',
humidityData=humidityData,
trafficData=trafficData,
energyData=energyData,
uberData=uberData)
@app.route('/layeredbarchart')
def layeredbarchart():
return render_template('layeredbarchart.html',
humidityData=humidityData,
trafficData=trafficData,
energyData=energyData,
uberData=uberData)
@app.route('/rosebarchart')
def rosebarchart():
return render_template('rosebarchart.html',
humidityData=humidityData,
trafficData=trafficData,
energyData=energyData,
uberData=uberData)
''' End '''
def createHumidityJson(data):
dictionary = dict()
data['date_time'] = pd.to_datetime(data['date_time'])
data['Phoenix'] = data['Phoenix'].round(0) #time consuming
i = 0
for stamp in data['date_time']:
#Create the key being the day
datestr = '{}-{}-{}'.format(stamp.year, stamp.month, stamp.day)
if datestr not in dictionary:
dictionary[datestr] = {
'AM' : [ 0 for x in range(12) ],
'PM' : [ 0 for x in range(12) ]
}
if stamp.hour < 12:
dictionary[datestr]['AM'][stamp.hour] += data['Phoenix'][i]
else:
dictionary[datestr]['PM'][stamp.hour - 12] += data['Phoenix'][i]
i+=1
#return json.dumps(dictionary)
df = pd.DataFrame.from_dict(dictionary)
path = '/static/data/cleanedJsonData/'
return df.to_json(path+'PhoenixHumidity.json')
def createUberPickupsJson(data):
dictionary = dict()
#time consuming
data.date_time = pd.to_datetime(data.date_time)
for stamp in data.date_time:
#Create the key being the day
datestr = '{}-{}-{}'.format(stamp.year, stamp.month, stamp.day)
if datestr not in dictionary:
dictionary[datestr] = {
'AM' : [ 0 for x in range(12) ],
'PM' : [ 0 for x in range(12) ]
}
if stamp.hour < 12:
dictionary[datestr]['AM'][stamp.hour] += 1
else:
dictionary[datestr]['PM'][stamp.hour - 12] += 1
return json.dumps(dictionary)
#df = pd.DataFrame.from_dict(dictionary)
#path = '/static/data/cleanedCsvData/'
#return df.to_json(path+'NYCUberPickups.json')
def createNYCTrafficFlowJson(data):
dictionary = dict()
for stamp in data['date']:
if stamp not in dictionary:
dictionary[stamp] = {
'AM' : [ 0 for x in range(12) ],
'PM' : [ 0 for x in range(12) ]
}
for index, row in data.iterrows():
dictionary[row['date']]['AM'] = [
row.am0,row.am1,row.am2,
row.am3,row.am4,row.am5,
row.am6,row.am7,row.am8,
row.am9,row.am10,row.am11]
dictionary[row['date']]['PM'] = [
row.pm0,row.pm1,row.pm2,
row.pm3,row.pm4,row.pm5,
row.pm6,row.pm7,row.pm8,
row.pm9,row.pm10,row.pm11]
return json.dumps(dictionary)
#df = pd.DataFrame.from_dict(dictionary)
#path = '/static/data/cleanedCsvData/'
#return df.to_json(path+'NYCTrafficFlow.json')
def createAmericanEnergyJson(data):
dictionary = dict()
data['date_time'] = | pd.to_datetime(data['date_time']) | pandas.to_datetime |
#!/usr/bin/env python
"""Tests for `arcos_py` package."""
from numpy import int64
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from arcos4py import ARCOS
from arcos4py.tools._errors import noDataError
@pytest.fixture
def no_bin_data():
"""
pytest fixture to generate test data
"""
data = [item for i in range(10) for item in list(range(1, 11))]
m = [0 for i in range(100)]
d = {'id': data, 'time': data, 'm': m, 'x': data}
print(d)
df = pd.DataFrame(d)
return df
def test_empty_data(no_bin_data: pd.DataFrame):
with pytest.raises(noDataError, match='Input is empty'):
test_data = no_bin_data[no_bin_data['m'] > 0]
pos = ['x']
ts = ARCOS(
test_data, posCols=pos, frame_column='time', id_column='id', measurement_column='m', clid_column='clTrackID'
)
ts.trackCollev(eps=1, minClsz=1, nPrev=2)
def test_1_central_1_prev():
df_in = pd.read_csv('tests/testdata/1central_in.csv')
df_true = pd.read_csv('tests/testdata/1central_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true, check_dtype=False)
def test_1_central_2_prev():
df_in = pd.read_csv('tests/testdata/1central_in.csv')
df_true = pd.read_csv('tests/testdata/1central2prev_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1, minClsz=1, nPrev=2)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true, check_dtype=False)
def test_1_central_3D():
df_in = pd.read_csv('tests/testdata/1central3D_in.csv')
df_true = pd.read_csv('tests/testdata/1central3D_res.csv')
pos = ['x', 'y', 'z']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x', 'y', 'z'])
assert_frame_equal(out, df_true)
def test_1_central_growing():
df_in = pd.read_csv('tests/testdata/1centralGrowing_in.csv')
df_true = pd.read_csv('tests/testdata/1centralGrowing_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_2_central_growing():
df_in = pd.read_csv('tests/testdata/2centralGrowing_in.csv')
df_true = pd.read_csv('tests/testdata/2centralGrowing_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_2_with_1_common_symmetric():
df_in = pd.read_csv('tests/testdata/2with1commonSym_in.csv')
df_true = pd.read_csv('tests/testdata/2with1commonSym_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_2_with_1_common_asymmetric():
df_in = pd.read_csv('tests/testdata/2with1commonAsym_in.csv')
df_true = pd.read_csv('tests/testdata/2with1commonAsym_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_3_spreading_1_prev():
df_in = pd.read_csv('tests/testdata/3spreading_in.csv')
df_true = pd.read_csv('tests/testdata/3spreading_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_3_spreading_2_prev():
df_in = pd.read_csv('tests/testdata/3spreading_in.csv')
df_true = pd.read_csv('tests/testdata/3spreading2prev_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=2)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_5_overlapping_1_prev():
df_in = pd.read_csv('tests/testdata/5overlapping_in.csv')
df_true = pd.read_csv('tests/testdata/5overlapping_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_5_overlapping_2_prev():
df_in = pd.read_csv('tests/testdata/5overlapping_in.csv')
df_true = pd.read_csv('tests/testdata/5overlapping2prev_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=2)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_6_overlapping():
df_in = pd.read_csv('tests/testdata/6overlapping_in.csv')
df_true = pd.read_csv('tests/testdata/6overlapping_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
out['trackID'] = out['trackID'].astype(int64)
assert_frame_equal(out, df_true)
def test_split_from_single():
df_in = pd.read_csv('tests/testdata/1objSplit_in.csv')
df_true = pd.read_csv('tests/testdata/1objSplit_res.csv')
pos = ['pos']
ts = ARCOS(df_in, posCols=pos, frame_column='t', id_column='id', measurement_column=None, clid_column='collid')
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['pos'])
assert_frame_equal(out, df_true)
def test_split_from_2_objects():
df_in = pd.read_csv('tests/testdata/2objSplit_in.csv')
df_true = pd.read_csv('tests/testdata/2objSplit_res.csv')
pos = ['pos']
ts = ARCOS(df_in, posCols=pos, frame_column='t', id_column='id', measurement_column=None, clid_column='collid')
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['pos'])
assert_frame_equal(out, df_true)
def test_cross_2_objects():
df_in = pd.read_csv('tests/testdata/2objCross_in.csv')
df_true = pd.read_csv('tests/testdata/2objCross_res.csv')
pos = ['pos']
ts = ARCOS(df_in, posCols=pos, frame_column='t', id_column='id', measurement_column=None, clid_column='collid')
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['pos'])
| assert_frame_equal(out, df_true) | pandas.testing.assert_frame_equal |
# pylint: disable=E1101
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from pandas.core.index import Index, Int64Index
from pandas.tseries.frequencies import infer_freq, to_offset
from pandas.tseries.offsets import DateOffset, generate_range, Tick
from pandas.tseries.tools import parse_time_string, normalize_date
from pandas.util.decorators import cache_readonly
import pandas.core.common as com
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
from pandas.lib import Timestamp
import pandas.lib as lib
import pandas._algos as _algos
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _as_i8(arg):
if isinstance(arg, np.ndarray) and arg.dtype == np.datetime64:
return arg.view('i8', type=np.ndarray)
else:
return arg
def _field_accessor(name, field):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = lib.tz_convert(values, utc, self.tz)
return lib.fast_field_accessor(values, field)
f.__name__ = name
return property(f)
def _wrap_i8_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_as_i8(arg) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _wrap_dt_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_dt_box_array(_as_i8(arg)) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _join_i8_wrapper(joinf, with_indexers=True):
@staticmethod
def wrapper(left, right):
if isinstance(left, np.ndarray):
left = left.view('i8', type=np.ndarray)
if isinstance(right, np.ndarray):
right = right.view('i8', type=np.ndarray)
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view('M8[ns]')
return join_index, left_indexer, right_indexer
return results
return wrapper
def _dt_index_cmp(opname):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if isinstance(other, datetime):
func = getattr(self, opname)
result = func(_to_m8(other))
elif isinstance(other, np.ndarray):
func = getattr(super(DatetimeIndex, self), opname)
result = func(other)
else:
other = _ensure_datetime64(other)
func = getattr(super(DatetimeIndex, self), opname)
result = func(other)
try:
return result.view(np.ndarray)
except:
return result
return wrapper
def _ensure_datetime64(other):
if isinstance(other, np.datetime64):
return other
elif com.is_integer(other):
return np.int64(other).view('M8[us]')
else:
raise TypeError(other)
def _dt_index_op(opname):
"""
Wrap arithmetic operations to convert timedelta to a timedelta64.
"""
def wrapper(self, other):
if isinstance(other, timedelta):
func = getattr(self, opname)
return func(np.timedelta64(other))
else:
func = getattr(super(DatetimeIndex, self), opname)
return func(other)
return wrapper
class TimeSeriesError(Exception):
pass
_midnight = time(0, 0)
class DatetimeIndex(Int64Index):
"""
Immutable ndarray of datetime64 data, represented internally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency information.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or pandas offset object, optional
One of pandas date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
"""
_join_precedence = 10
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
_algos.left_join_indexer_unique_int64, with_indexers=False)
_groupby = lib.groupby_arrays # _wrap_i8_function(lib.groupby_int64)
_arrmap = _wrap_dt_function(_algos.arrmap_object)
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__')
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
# structured array cache for datetime fields
_sarr_cache = None
_engine_type = lib.DatetimeEngine
offset = None
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None, tz=None,
verify_integrity=True, normalize=False, **kwds):
warn = False
if 'offset' in kwds and kwds['offset']:
freq = kwds['offset']
warn = True
infer_freq = False
if not isinstance(freq, DateOffset):
if freq != 'infer':
freq = to_offset(freq)
else:
infer_freq = True
freq = None
if warn:
import warnings
warnings.warn("parameter 'offset' is deprecated, "
"please use 'freq' instead",
FutureWarning)
if isinstance(freq, basestring):
freq = to_offset(freq)
else:
if isinstance(freq, basestring):
freq = to_offset(freq)
offset = freq
if data is None and offset is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, offset,
tz=tz, normalize=normalize)
if not isinstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('DatetimeIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
if isinstance(data, datetime):
data = [data]
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(data, offset)
else:
data = tools.to_datetime(data)
data.offset = offset
if issubclass(data.dtype.type, basestring):
subarr = _str_to_dt_array(data, offset)
elif issubclass(data.dtype.type, np.datetime64):
if isinstance(data, DatetimeIndex):
subarr = data.values
offset = data.offset
verify_integrity = False
else:
subarr = np.array(data, dtype='M8[ns]', copy=copy)
elif issubclass(data.dtype.type, np.integer):
subarr = np.array(data, dtype='M8[ns]', copy=copy)
else:
subarr = tools.to_datetime(data)
if not np.issubdtype(subarr.dtype, np.datetime64):
raise TypeError('Unable to convert %s to datetime dtype'
% str(data))
if tz is not None:
tz = tools._maybe_get_tz(tz)
# Convert local to UTC
ints = subarr.view('i8')
lib.tz_localize_check(ints, tz)
subarr = lib.tz_convert(ints, tz, _utc())
subarr = subarr.view('M8[ns]')
subarr = subarr.view(cls)
subarr.name = name
subarr.offset = offset
subarr.tz = tz
if verify_integrity and len(subarr) > 0:
if offset is not None and not infer_freq:
inferred = subarr.inferred_freq
if inferred != offset.freqstr:
raise ValueError('Dates do not conform to passed '
'frequency')
if infer_freq:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False):
_normalized = True
if start is not None:
start = Timestamp(start)
if not isinstance(start, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% start)
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
end = Timestamp(end)
if not isinstance(end, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% end)
if normalize:
end = | normalize_date(end) | pandas.tseries.tools.normalize_date |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 6 09:28:00 2020
@author: <NAME>
PCA heterogenity plot
"""
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.gridspec as gridspec
import seaborn as sns; sns.set(color_codes=True)
def pca_het (adata, x_coordinate='X_position',y_coordinate='Y_position',
unique_id='ImageId',subset_image=None, raw_data=True,
phenotype='phenotype',phenotype_of_interest=None,
genes_of_interest=None,s = 2, alpha=0.8,fontsize=5,
widths = [2],heights = [6,1], save_fig=False, save_dir=None,
save_name = 'PCA_Heterogenity_Plot', save_format='png', figsize=(10, 10)):
# Copy adata
bdata = adata
if bdata.raw is None:
bdata.raw = bdata
if subset_image is not None:
bdata = bdata[bdata.obs[unique_id] == subset_image]
if phenotype_of_interest is not None:
bdata = bdata[bdata.obs[phenotype].isin(phenotype_of_interest)]
if genes_of_interest is not None:
bdata = bdata[:, genes_of_interest]
else:
genes_of_interest = list(bdata.var.index)
# Create a dataframe with the necessary information
if raw_data is True:
data = pd.DataFrame(np.log1p(bdata.raw[:, genes_of_interest].X), index=bdata.obs.index, columns=bdata.var.index)
else:
data = | pd.DataFrame(bdata[:, genes_of_interest].X, index=bdata.obs.index, columns=bdata.var.index) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
ROOT_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
DATA_DIR = os.path.join(ROOT_DIR, 'data')
RESULTS_DIR = os.path.join(ROOT_DIR, 'results')
FIGURES_DIR = os.path.join(ROOT_DIR, 'figures')
DOWNLOAD_DIR = os.path.join(DATA_DIR, 'download')
def keystrokes2events(df):
time_cols = [c for c in df.columns if 'time' in c]
dfs = []
for col in time_cols:
new_df = df[df.columns.difference(time_cols)]
new_df['time'] = df[col]
new_df['action'] = col
dfs.append(new_df)
df = | pd.concat(dfs) | pandas.concat |
import datetime as dt
import numpy as np
import pathlib
import pandas as pd
from functools import partial
from .deprecations import deprecated_kwargs
from . import utils
from copy import deepcopy
from collections import OrderedDict
from collections.abc import Iterable
from openpyxl import load_workbook
from openpyxl.cell.cell import get_column_letter
from openpyxl.xml.functions import fromstring, QName
from openpyxl.utils import cell
from styleframe.container import Container
from styleframe.series import Series
from styleframe.styler import Styler, ColorScaleConditionalFormatRule
try:
pd_timestamp = pd.Timestamp
except AttributeError:
pd_timestamp = pd.tslib.Timestamp
class StyleFrame:
"""
A wrapper class that wraps a :class:`pandas.DataFrame` object and represent a stylized dataframe.
Stores container objects that have values and styles that will be applied to excel
:param obj: Any object that pandas' dataframe can be initialized with: an existing dataframe, a dictionary,
a list of dictionaries or another StyleFrame.
:param styler_obj: Will be used as the default style of all cells.
:type styler_obj: :class:`.Styler`
"""
P_FACTOR = 1.3
A_FACTOR = 13
def __init__(self, obj, styler_obj=None):
from_another_styleframe = False
from_pandas_dataframe = False
if styler_obj and not isinstance(styler_obj, Styler):
raise TypeError('styler_obj must be {}, got {} instead.'.format(Styler.__name__, type(styler_obj).__name__))
if isinstance(obj, pd.DataFrame):
from_pandas_dataframe = True
if obj.empty:
self.data_df = deepcopy(obj)
else:
self.data_df = obj.applymap(lambda x: Container(x, deepcopy(styler_obj)) if not isinstance(x, Container) else x)
elif isinstance(obj, pd.Series):
self.data_df = obj.apply(lambda x: Container(x, deepcopy(styler_obj)) if not isinstance(x, Container) else x)
elif isinstance(obj, (dict, list)):
self.data_df = pd.DataFrame(obj).applymap(lambda x: Container(x, deepcopy(styler_obj)) if not isinstance(x, Container) else x)
elif isinstance(obj, StyleFrame):
self.data_df = deepcopy(obj.data_df)
from_another_styleframe = True
else:
raise TypeError("{} __init__ doesn't support {}".format(type(self).__name__, type(obj).__name__))
self.data_df.columns = [Container(col, deepcopy(styler_obj)) if not isinstance(col, Container) else deepcopy(col)
for col in self.data_df.columns]
self.data_df.index = [Container(index, deepcopy(styler_obj)) if not isinstance(index, Container) else deepcopy(index)
for index in self.data_df.index]
if from_pandas_dataframe:
self.data_df.index.name = obj.index.name
self._columns_width = obj._columns_width if from_another_styleframe else OrderedDict()
self._rows_height = obj._rows_height if from_another_styleframe else OrderedDict()
self._has_custom_headers_style = obj._has_custom_headers_style if from_another_styleframe else False
self._cond_formatting = []
self._default_style = styler_obj or Styler()
self._index_header_style = obj._index_header_style if from_another_styleframe else self._default_style
self._known_attrs = {'at': self.data_df.at,
'loc': self.data_df.loc,
'iloc': self.data_df.iloc,
'applymap': self.data_df.applymap,
'groupby': self.data_df.groupby,
'index': self.data_df.index,
'fillna': self.data_df.fillna}
def __str__(self):
return str(self.data_df)
def __len__(self):
return len(self.data_df)
def __getitem__(self, item):
if isinstance(item, pd.Series):
return self.data_df.__getitem__(item).index
if isinstance(item, list):
return StyleFrame(self.data_df.__getitem__(item))
return Series(self.data_df.__getitem__(item))
def __setitem__(self, key, value):
if isinstance(value, (Iterable, pd.Series)):
self.data_df.__setitem__(Container(key), list(map(Container, value)))
else:
self.data_df.__setitem__(Container(key), Container(value))
def __delitem__(self, item):
return self.data_df.__delitem__(item)
def __getattr__(self, attr):
if attr in self.data_df.columns:
return self.data_df[attr]
try:
return self._known_attrs[attr]
except KeyError:
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
@property
def columns(self):
return self.data_df.columns
@columns.setter
def columns(self, columns):
self.data_df.columns = [col if isinstance(col, Container) else Container(value=col)
for col in columns]
def _get_column_as_letter(self, sheet, column_to_convert, startcol=0):
col = column_to_convert.value if isinstance(column_to_convert, Container) else column_to_convert
if not isinstance(col, (int, str)):
raise TypeError("column must be an index, column letter or column name")
column_as_letter = None
if col in self.data_df.columns: # column name
column_index = self.data_df.columns.get_loc(col) + startcol + 1 # worksheet columns index start from 1
column_as_letter = cell.get_column_letter(column_index)
# column index
elif isinstance(col, int) and col >= 1:
column_as_letter = cell.get_column_letter(startcol + col)
# assuming we got column letter
elif isinstance(col, str) and col <= get_column_letter(sheet.max_column):
column_as_letter = col
if column_as_letter is None or cell.column_index_from_string(column_as_letter) > sheet.max_column:
raise IndexError("column: %s is out of columns range." % column_to_convert)
return column_as_letter
@classmethod
def read_excel(cls, path, sheet_name=0, read_style=False, use_openpyxl_styles=False,
read_comments=False, **kwargs):
"""
Creates a StyleFrame object from an existing Excel.
.. note:: :meth:`read_excel` also accepts all arguments that :func:`pandas.read_excel` accepts as kwargs.
:param str path: The path to the Excel file to read.
:param sheetname:
.. deprecated:: 1.6
Use ``sheet_name`` instead.
.. versionchanged:: 4.0
Removed
:param sheet_name: The sheet name to read. If an integer is provided then it be used as a zero-based
sheet index. Default is 0.
:type sheet_name: str or int
:param bool read_style: If ``True`` the sheet's style will be loaded to the returned StyleFrame object.
:param bool use_openpyxl_styles: If ``True`` (and `read_style` is also ``True``) then the styles in the returned
StyleFrame object will be Openpyxl's style objects. If ``False``, the styles will be :class:`.Styler` objects.
.. note:: Using ``use_openpyxl_styles=False`` is useful if you are going to filter columns or rows by style, for example:
::
sf = sf[[col for col in sf.columns if col.style.font == utils.fonts.arial]]
:param bool read_comments: If ``True`` (and `read_style` is also ``True``) cells' comments will be loaded to the returned StyleFrame object. Note
that reading comments without reading styles is currently not supported.
:return: StyleFrame object
:rtype: :class:`StyleFrame`
"""
def _get_scheme_colors_from_excel(wb):
xlmns = 'http://schemas.openxmlformats.org/drawingml/2006/main'
if wb.loaded_theme is None:
return []
root = fromstring(wb.loaded_theme)
theme_element = root.find(QName(xlmns, 'themeElements').text)
color_schemes = theme_element.findall(QName(xlmns, 'clrScheme').text)
colors = []
for colorScheme in color_schemes:
for tag in ['lt1', 'dk1', 'lt2', 'dk2', 'accent1', 'accent2', 'accent3', 'accent4', 'accent5', 'accent6']:
accent = list(colorScheme.find(QName(xlmns, tag).text))[0]
if 'window' in accent.attrib['val']:
colors.append(accent.attrib['lastClr'])
else:
colors.append(accent.attrib['val'])
return colors
def _get_style_object(sheet, theme_colors, row, column):
cell = sheet.cell(row=row, column=column)
if use_openpyxl_styles:
return cell
else:
return Styler.from_openpyxl_style(cell, theme_colors,
read_comments and cell.comment)
def _read_style():
wb = load_workbook(path)
if isinstance(sheet_name, str):
sheet = wb[sheet_name]
elif isinstance(sheet_name, int):
sheet = wb.worksheets[sheet_name]
else:
raise TypeError("'sheet_name' must be a string or int, got {} instead".format(type(sheet_name)))
theme_colors = _get_scheme_colors_from_excel(wb)
# Set the headers row height
if header_arg is not None:
headers_row_idx = header_arg + 1
sf._rows_height[headers_row_idx] = sheet.row_dimensions[headers_row_idx].height
get_style_object = partial(_get_style_object, sheet=sheet, theme_colors=theme_colors)
for col_index, col_name in enumerate(sf.columns):
col_index_in_excel = col_index + 1
if col_index_in_excel == excel_index_col:
for row_index, sf_index in enumerate(sf.index, start=2):
sf_index.style = get_style_object(row=row_index, column=col_index_in_excel)
col_index_in_excel += 1 # Move next to excel indices column
sf.columns[col_index].style = get_style_object(row=1, column=col_index_in_excel)
for row_index, sf_index in enumerate(sf.index, start=start_row_index):
sf.at[sf_index, col_name].style = get_style_object(row=row_index, column=col_index_in_excel)
sf._rows_height[row_index] = sheet.row_dimensions[row_index].height
sf._columns_width[col_name] = sheet.column_dimensions[sf._get_column_as_letter(sheet, col_name)].width
header_arg = kwargs.get('header', 0)
if read_style and isinstance(header_arg, Iterable):
raise ValueError('Not supporting multiple index columns with read style.')
if header_arg is None:
start_row_index = 1
else:
start_row_index = header_arg + 2
index_col = kwargs.get('index_col')
excel_index_col = index_col + 1 if index_col is not None else None
if read_style and isinstance(excel_index_col, Iterable):
raise ValueError('Not supporting multiple index columns with read style.')
sf = cls(pd.read_excel(path, sheet_name, **kwargs))
if read_style:
_read_style()
sf._has_custom_headers_style = True
return sf
@classmethod
def read_excel_as_template(cls, path, df, use_df_boundaries=False, **kwargs):
"""
.. versionadded:: 3.0.1
Create a StyleFrame object from an excel template with data of the given DataFrame.
.. note:: :meth:`read_excel_as_template` also accepts all arguments that :meth:`read_excel` accepts as kwargs except for ``read_style`` which must be ``True``.
:param str path: The path to the Excel file to read.
:param df: The data to apply to the given template.
:type df: :class:`pandas.DataFrame`
:param bool use_df_boundaries: If ``True`` the template will be cut according to the boundaries of the given DataFrame.
:return: StyleFrame object
:rtype: :class:`StyleFrame`
"""
sf = cls.read_excel(path=path, read_style=True, **kwargs)
num_of_rows, num_of_cols = len(df.index), len(df.columns)
template_num_of_rows, template_num_of_cols = len(sf.index), len(sf.columns)
num_of_cols_to_copy_with_style = min(num_of_cols, template_num_of_cols)
num_of_rows_to_copy_with_style = min(num_of_rows, template_num_of_rows)
for col_index in range(num_of_cols_to_copy_with_style):
for row_index in range(num_of_rows_to_copy_with_style):
sf.iloc[row_index, col_index].value = df.iloc[row_index, col_index]
# Insert extra data in cases where the df is larger than the template.
for extra_col in df.columns[template_num_of_cols:]:
sf[extra_col] = df[extra_col][:template_num_of_rows]
for row_index in df.index[template_num_of_rows:]:
sf_index = Container(value=row_index)
sf.loc[sf_index] = list(map(Container, df.loc[row_index]))
sf.rename({sf.columns[col_index].value: df_col
for col_index, df_col in enumerate(df.columns)},
inplace=True)
if use_df_boundaries:
sf.data_df = sf.data_df.iloc[:num_of_rows, :num_of_cols]
rows_height = OrderedDict()
rows_height_range = range(num_of_rows)
for i, (k, v) in enumerate(sf._rows_height.items()):
if i in rows_height_range:
rows_height[k] = v
sf._rows_height = rows_height
columns_width = OrderedDict()
columns_width_range = range(num_of_cols)
for i, (k, v) in enumerate(sf._columns_width.items()):
if i in columns_width_range:
columns_width[k] = v
sf._columns_width = columns_width
return sf
# noinspection PyPep8Naming
@classmethod
def ExcelWriter(cls, path, **kwargs):
"""
A shortcut for :class:`pandas.ExcelWriter`, and accepts any argument it accepts except for ``engine``
"""
if 'engine' in kwargs:
raise ValueError('`engine` argument for StyleFrame.ExcelWriter can not be set')
return pd.ExcelWriter(path, engine='openpyxl', **kwargs)
@property
def row_indexes(self):
"""Excel row indexes.
StyleFrame row indexes (including the headers) according to the excel file format.
Mostly used to set rows height.
Excel indexes format starts from index 1.
:rtype: tuple
:meta private:
"""
return tuple(range(1, len(self) + 2))
def to_excel(self, excel_writer='output.xlsx', sheet_name='Sheet1',
allow_protection=False, right_to_left=False, columns_to_hide=None, row_to_add_filters=None,
columns_and_rows_to_freeze=None, best_fit=None, **kwargs):
"""Saves the dataframe to excel and applies the styles.
.. note:: :meth:`to_excel` also accepts all arguments that :meth:`pandas.DataFrame.to_excel` accepts as kwargs.
:param excel_writer: File path or existing ExcelWriter
:type excel_writer: str or :class:`pandas.ExcelWriter` or :class:`pathlib.Path`
:param str sheet_name: Name of sheet the StyleFrame will be exported to
:param bool allow_protection: Allow to protect the cells that specified as protected. If used ``protection=True``
in a Styler object this must be set to ``True``.
:param bool right_to_left: Makes the sheet right-to-left.
:param columns_to_hide: Columns names to hide.
:type columns_to_hide: None or str or list or tuple or set
:param row_to_add_filters: Add filters to the given row index, starts from 0 (which will add filters to header row).
:type row_to_add_filters: None or int
:param columns_and_rows_to_freeze: Column and row string to freeze.
For example "C3" will freeze columns: A, B and rows: 1, 2.
:type columns_and_rows_to_freeze: None or str
.. versionadded:: 1.4
:param best_fit: single column, list, set or tuple of columns names to attempt to best fit the width for.
.. note:: ``best_fit`` will attempt to calculate the correct column-width based on the longest value in each provided
column. However this isn't guaranteed to work for all fonts (works best with monospaced fonts). The formula
used to calculate a column's width is equivalent to
::
(len(longest_value_in_column) + A_FACTOR) * P_FACTOR
The default values for ``A_FACTOR`` and ``P_FACTOR`` are 13 and 1.3 respectively, and can be modified before
calling ``StyleFrame.to_excel`` by directly modifying ``StyleFrame.A_FACTOR`` and ``StyleFrame.P_FACTOR``
:type best_fit: None or str or list or tuple or set
:return: self
:rtype: :class:`StyleFrame`
"""
# dealing with needed pandas.to_excel defaults
header = kwargs.pop('header', True)
index = kwargs.pop('index', False)
startcol = kwargs.pop('startcol', 0)
startrow = kwargs.pop('startrow', 0)
na_rep = kwargs.pop('na_rep', '')
def get_values(x):
if isinstance(x, Container):
return x.value
else:
try:
if np.isnan(x):
return na_rep
else:
return x
except TypeError:
return x
def within_sheet_boundaries(row=1, column='A'):
return (1 <= int(row) <= sheet.max_row
and
1 <= cell.column_index_from_string(column) <= sheet.max_column)
def get_range_of_cells(row_index=None, columns=None):
if columns is None:
start_letter = self._get_column_as_letter(sheet, self.data_df.columns[0], startcol)
end_letter = self._get_column_as_letter(sheet, self.data_df.columns[-1], startcol)
else:
start_letter = self._get_column_as_letter(sheet, columns[0], startcol)
end_letter = self._get_column_as_letter(sheet, columns[-1], startcol)
if row_index is None: # returns cells range for the entire dataframe
start_index = startrow + 1
end_index = start_index + len(self)
else:
start_index = startrow + row_index + 1
end_index = start_index
return '{start_letter}{start_index}:{end_letter}{end_index}'.format(start_letter=start_letter,
start_index=start_index,
end_letter=end_letter,
end_index=end_index)
if len(self.data_df) > 0:
export_df = self.data_df.applymap(get_values)
else:
export_df = deepcopy(self.data_df)
export_df.columns = [col.value for col in export_df.columns]
# noinspection PyTypeChecker
export_df.index = [row_index.value for row_index in export_df.index]
export_df.index.name = self.data_df.index.name
if isinstance(excel_writer, (str, pathlib.Path)):
excel_writer = self.ExcelWriter(excel_writer)
export_df.to_excel(excel_writer, sheet_name=sheet_name, engine='openpyxl', header=header,
index=index, startcol=startcol, startrow=startrow, na_rep=na_rep, **kwargs)
sheet = excel_writer.sheets[sheet_name]
sheet.sheet_view.rightToLeft = right_to_left
self.data_df.fillna(Container('NaN'), inplace=True)
if index:
if self.data_df.index.name:
index_name_cell = sheet.cell(row=startrow + 1, column=startcol + 1)
index_name_cell.style = self._index_header_style.to_openpyxl_style()
for row_index, index in enumerate(self.data_df.index):
try:
date_time_types_to_formats = {pd_timestamp: index.style.date_time_format,
dt.datetime: index.style.date_time_format,
dt.date: index.style.date_format,
dt.time: index.style.time_format}
index.style.number_format = date_time_types_to_formats.get(type(index.value),
index.style.number_format)
style_to_apply = index.style.to_openpyxl_style()
except AttributeError:
style_to_apply = index.style
current_cell = sheet.cell(row=startrow + row_index + 2, column=startcol + 1)
current_cell.style = style_to_apply
if isinstance(index.style, Styler):
current_cell.comment = index.style.generate_comment()
else:
if hasattr(index.style, 'comment'):
index.style.comment.parent = None
current_cell.comment = index.style.comment
startcol += 1
if header and not self._has_custom_headers_style:
self.apply_headers_style(Styler.default_header_style())
# Iterating over the dataframe's elements and applying their styles
# openpyxl's rows and cols start from 1,1 while the dataframe is 0,0
for col_index, column in enumerate(self.data_df.columns):
try:
date_time_types_to_formats = {pd_timestamp: column.style.date_time_format,
dt.datetime: column.style.date_time_format,
dt.date: column.style.date_format,
dt.time: column.style.time_format}
column.style.number_format = date_time_types_to_formats.get(type(column.value),
column.style.number_format)
style_to_apply = column.style.to_openpyxl_style()
except AttributeError:
style_to_apply = Styler.from_openpyxl_style(column.style, [],
openpyxl_comment=column.style.comment).to_openpyxl_style()
column_header_cell = sheet.cell(row=startrow + 1, column=col_index + startcol + 1)
column_header_cell.style = style_to_apply
if isinstance(column.style, Styler):
column_header_cell.comment = column.style.generate_comment()
else:
if hasattr(column.style, 'comment') and column.style.comment is not None:
column_header_cell.comment = column.style.comment
for row_index, index in enumerate(self.data_df.index):
current_cell = sheet.cell(row=row_index + startrow + (2 if header else 1), column=col_index + startcol + 1)
data_df_style = self.data_df.at[index, column].style
try:
if '=HYPERLINK' in str(current_cell.value):
data_df_style.font_color = utils.colors.blue
data_df_style.underline = utils.underline.single
else:
if best_fit and column.value in best_fit:
data_df_style.wrap_text = False
data_df_style.shrink_to_fit = False
try:
date_time_types_to_formats = {pd_timestamp: data_df_style.date_time_format,
dt.datetime: data_df_style.date_time_format,
dt.date: data_df_style.date_format,
dt.time: data_df_style.time_format}
data_df_style.number_format = date_time_types_to_formats.get(type(self.data_df.at[index,column].value),
data_df_style.number_format)
style_to_apply = data_df_style.to_openpyxl_style()
except AttributeError:
style_to_apply = Styler.from_openpyxl_style(data_df_style, [],
openpyxl_comment=data_df_style.comment).to_openpyxl_style()
current_cell.style = style_to_apply
if isinstance(data_df_style, Styler):
current_cell.comment = data_df_style.generate_comment()
else:
if hasattr(data_df_style, 'comment') and data_df_style.comment is not None:
current_cell.comment = data_df_style.comment
except AttributeError: # if the element in the dataframe is not Container creating a default style
current_cell.style = Styler().to_openpyxl_style()
if best_fit:
if not isinstance(best_fit, (list, set, tuple)):
best_fit = [best_fit]
self.set_column_width_dict({column: (max(self.data_df[column].astype(str).str.len()) + self.A_FACTOR) * self.P_FACTOR
for column in best_fit})
for column in self._columns_width:
column_letter = self._get_column_as_letter(sheet, column, startcol)
sheet.column_dimensions[column_letter].width = self._columns_width[column]
for row in self._rows_height:
if within_sheet_boundaries(row=(row + startrow)):
sheet.row_dimensions[startrow + row].height = self._rows_height[row]
else:
raise IndexError('row: {} is out of range'.format(row))
if row_to_add_filters is not None:
try:
row_to_add_filters = int(row_to_add_filters)
if not within_sheet_boundaries(row=(row_to_add_filters + startrow + 1)):
raise IndexError('row: {} is out of rows range'.format(row_to_add_filters))
sheet.auto_filter.ref = get_range_of_cells(row_index=row_to_add_filters)
except (TypeError, ValueError):
raise TypeError("row must be an index and not {}".format(type(row_to_add_filters)))
if columns_and_rows_to_freeze is not None:
if not isinstance(columns_and_rows_to_freeze, str) or len(columns_and_rows_to_freeze) < 2:
raise TypeError("columns_and_rows_to_freeze must be a str for example: 'C3'")
if not within_sheet_boundaries(column=columns_and_rows_to_freeze[0]):
raise IndexError("column: %s is out of columns range." % columns_and_rows_to_freeze[0])
if not within_sheet_boundaries(row=columns_and_rows_to_freeze[1]):
raise IndexError("row: %s is out of rows range." % columns_and_rows_to_freeze[1])
sheet.freeze_panes = sheet[columns_and_rows_to_freeze]
if allow_protection:
sheet.protection.autoFilter = False
sheet.protection.enable()
# Iterating over the columns_to_hide and check if the format is columns name, column index as number or letter
if columns_to_hide:
if not isinstance(columns_to_hide, (list, set, tuple)):
columns_to_hide = [columns_to_hide]
for column in columns_to_hide:
column_letter = self._get_column_as_letter(sheet, column, startcol)
sheet.column_dimensions[column_letter].hidden = True
for cond_formatting in self._cond_formatting:
sheet.conditional_formatting.add(get_range_of_cells(columns=cond_formatting.columns),
cond_formatting.rule)
return excel_writer
def apply_style_by_indexes(self, indexes_to_style, styler_obj, cols_to_style=None, height=None,
complement_style=None, complement_height=None, overwrite_default_style=True):
"""
Applies a certain style to the provided indexes in the dataframe in the provided columns
:param indexes_to_style: Indexes to which the provided style will be applied.
Usually passed as pandas selecting syntax. For example,
::
sf[sf['some_col'] = 20]
:type indexes_to_style: list or tuple or int or Container
:param styler_obj: `Styler` object that contains the style that will be applied to indexes in `indexes_to_style`
:type styler_obj: :class:`.Styler`
:param cols_to_style: The column names to apply the provided style to. If ``None`` all columns will be styled.
:type cols_to_style: None or str or list[str] or tuple[str] or set[str]
:param height: If provided, set height for rows whose indexes are in `indexes_to_style`.
:type height: None or int or float
.. versionadded:: 1.5
:param complement_style: `Styler` object that contains the style which will be applied to indexes not in `indexes_to_style`
:type complement_style: None or :class:`.Styler`
:param complement_height: Height for rows whose indexes are not in `indexes_to_style`. If not provided then
`height` will be used (if provided).
:type complement_height: None or int or float
.. versionadded:: 1.6
:param bool overwrite_default_style: If ``True``, the default style (the style used when initializing StyleFrame)
will be overwritten. If ``False`` then the default style and the provided style wil be combined using
:meth:`.Styler.combine` method.
:return: self
:rtype: :class:`StyleFrame`
"""
if not isinstance(styler_obj, Styler):
raise TypeError('styler_obj must be {}, got {} instead.'.format(Styler.__name__, type(styler_obj).__name__))
if isinstance(indexes_to_style, (list, tuple, int)):
indexes_to_style = self.index[indexes_to_style]
elif isinstance(indexes_to_style, Container):
indexes_to_style = | pd.Index([indexes_to_style]) | pandas.Index |
# -*- coding: utf-8 -*-
"""
Created on set/2020
json a partir da tabela sqlite
@author: github rictom/rede-cnpj
2020-11-25 - Se uma tabela já existir, parece causar lentidão para o pandas pd.to_sql.
Não fazer Create table ou criar índice para uma tabela a ser criada ou modificada pelo pandas
"""
import os, sys, glob
import time, copy, re, string, unicodedata, collections
import pandas as pd, sqlalchemy
from fnmatch import fnmatch
'''
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite://',
connect_args={'check_same_thread':False},
poolclass=StaticPool)
'''
import config
try:
camDbSqlite = config.config['BASE']['base_receita']
except:
sys.exit('o arquivo sqlite não foi localizado. Veja o caminho da base no arquivo de configuracao rede.ini está correto.')
camDBSqliteFTS = config.config['BASE'].get('base_receita_fulltext','')
caminhoDBLinks = config.config['BASE'].get('base_links', '')
caminhoDBEnderecoNormalizado = config.config['BASE'].get('base_endereco_normalizado', '')
#logAtivo = True if config['rede']['logAtivo']=='1' else False #registra cnpjs consultados
logAtivo = config.config['ETC'].getboolean('logativo',False) #registra cnpjs consultados
# ligacaoSocioFilial = True if config['rede']['ligacaoSocioFilial']=='1' else False #registra cnpjs consultados
ligacaoSocioFilial = config.config['ETC'].getboolean('ligacao_socio_filial',False) #registra cnpjs consultados
class DicionariosCodigos():
def __init__(self):
dfaux = pd.read_csv(r"tabelas/tabela-de-qualificacao-do-socio-representante.csv", sep=';')
self.dicQualificacao_socio = pd.Series(dfaux.descricao.values,index=dfaux.codigo).to_dict()
dfaux = pd.read_csv(r"tabelas/DominiosMotivoSituaoCadastral.csv", sep=';', encoding='latin1', dtype=str)
self.dicMotivoSituacao = pd.Series(dfaux['Descrição'].values, index=dfaux['Código']).to_dict()
dfaux = pd.read_excel(r"tabelas/cnae.xlsx", sheet_name='codigo-grupo-classe-descr')
self.dicCnae = | pd.Series(dfaux['descricao'].values, index=dfaux['codigo']) | pandas.Series |
"""A collections of functions to facilitate
analysis of HiC data based on the cooler and cooltools
interfaces."""
import warnings
from typing import Tuple, Dict, Callable
import cooltools.expected
import cooltools.snipping
import pandas as pd
import bioframe
import cooler
import pairtools
import numpy as np
import multiprocess
from .snipping_lib import flexible_pileup
# define type aliases
CisTransPairs = Dict[str, pd.DataFrame]
PairsSamples = Dict[str, CisTransPairs]
# define functions
def get_expected(
clr: cooler.Cooler, arms: pd.DataFrame, proc: int = 20, ignore_diagonals: int = 2
) -> pd.DataFrame:
"""Takes a clr file handle and a pandas dataframe
with chromosomal arms (generated by getArmsHg19()) and calculates
the expected read number at a certain genomic distance.
The proc parameters defines how many processes should be used
to do the calculations. ingore_diags specifies how many diagonals
to ignore (0 mains the main diagonal, 1 means the main diagonal
and the flanking tow diagonals and so on)"""
with multiprocess.Pool(proc) as pool:
expected = cooltools.expected.diagsum(
clr,
tuple(arms.itertuples(index=False, name=None)),
transforms={"balanced": lambda p: p["count"] * p["weight1"] * p["weight2"]},
map=pool.map,
ignore_diags=ignore_diagonals,
)
# construct a single dataframe for all regions (arms)
expected_df = (
expected.groupby(["region", "diag"])
.aggregate({"n_valid": "sum", "count.sum": "sum", "balanced.sum": "sum"})
.reset_index()
)
# account for different number of valid bins in diagonals
expected_df["balanced.avg"] = expected_df["balanced.sum"] / expected_df["n_valid"]
return expected_df
def get_arms_hg19() -> pd.DataFrame:
"""Downloads the coordinates for chromosomal arms of the
genome assembly hg19 and returns it as a dataframe."""
# download chromosomal sizes
chromsizes = bioframe.fetch_chromsizes("hg19")
# download centromers
centromeres = bioframe.fetch_centromeres("hg19")
centromeres.set_index("chrom", inplace=True)
centromeres = centromeres.mid
# define chromosomes that are well defined (filter out unassigned contigs)
good_chroms = list(chromsizes.index[:23])
# construct arm regions (for each chromosome fro 0-centromere and from centromere to the end)
arms = [
arm
for chrom in good_chroms
for arm in (
(chrom, 0, centromeres.get(chrom, 0)),
(chrom, centromeres.get(chrom, 0), chromsizes.get(chrom, 0)),
)
]
# construct dataframe out of arms
arms = pd.DataFrame(arms, columns=["chrom", "start", "end"])
return arms
def _assign_supports(features, supports):
"""assigns supports to entries in snipping windows.
Workaround for bug in cooltools 0.2.0 that duplicate
supports are not handled correctly. Copied from cooltools.common.assign_regions"""
index_name = features.index.name # Store the name of index
features = (
features.copy().reset_index()
) # Store the original features' order as a column with original index
if "chrom" in features.columns:
overlap = bioframe.overlap(
features,
supports,
how="left",
cols1=["chrom", "start", "end"],
cols2=["chrom", "start", "end"],
keep_order=True,
return_overlap=True,
)
overlap_columns = [
"index_1",
"chrom_1",
"start_1",
"end_1",
] # To filter out duplicates later
overlap["overlap_length"] = overlap["overlap_end"] - overlap["overlap_start"]
# Filter out overlaps with multiple regions:
overlap = (
overlap.sort_values("overlap_length", ascending=False)
.drop_duplicates(overlap_columns, keep="first")
.sort_index()
).reset_index(drop=True)
# Copy single column with overlapping region name:
features["region"] = overlap["name_2"]
if "chrom1" in features.columns:
for idx in ("1", "2"):
overlap = bioframe.overlap(
features,
supports,
how="left",
cols1=[f"chrom{idx}", f"start{idx}", f"end{idx}"],
cols2=[f"chrom", f"start", f"end"],
keep_order=True,
return_overlap=True,
)
overlap_columns = [
"index_1",
f"chrom{idx}_1",
f"start{idx}_1",
f"end{idx}_1",
] # To filter out duplicates later
overlap[f"overlap_length{idx}"] = (
overlap[f"overlap_end{idx}"] - overlap[f"overlap_start{idx}"]
)
# Filter out overlaps with multiple regions:
overlap = (
overlap.sort_values(f"overlap_length{idx}", ascending=False)
.drop_duplicates(overlap_columns, keep="first")
.sort_index()
).reset_index(drop=True)
# Copy single column with overlapping region name:
features[f"region{idx}"] = overlap["name_2"]
# Form a single column with region names where region1 == region2, and np.nan in other cases:
features["region"] = np.where(
features["region1"] == features["region2"], features["region1"], np.nan
)
features = features.drop(
["region1", "region2"], axis=1
) # Remove unnecessary columns
features = features.set_index(
index_name if not index_name is None else "index"
) # Restore the original index
features.index.name = index_name # Restore original index title
return features
def assign_regions(
window: int,
binsize: int,
chroms: pd.Series,
positions: pd.Series,
arms: pd.DataFrame,
) -> pd.DataFrame:
"""Constructs a 2d region around a series of chromosomal location.
Window specifies the windowsize for the constructed regions. The total region
assigned will be pos-window until pos+window. The binsize specifies the size
of the HiC bins. The positions which represent the center of the regions
is givin the the chroms series and the positions series."""
# construct windows from the passed chromosomes and positions
snipping_windows = cooltools.snipping.make_bin_aligned_windows(
binsize, chroms.values, positions.values, window
)
# assign chromosomal arm to each position
snipping_windows = _assign_supports(snipping_windows, bioframe.parse_regions(arms))
return snipping_windows
def assign_regions_2d(
window: int,
binsize: int,
chroms1: pd.Series,
positions1: pd.Series,
chroms2: pd.Series,
positions2: pd.Series,
arms: pd.DataFrame,
) -> pd.DataFrame:
"""Constructs a 2d region around a series of chromosomal location pairs.
Window specifies the windowsize for the constructed regions. The total region
assigned will be pos-window until pos+window. The binsize specifies the size
of the HiC bins. The positions which represent the center of the regions
is given by the chroms1 and chroms2 series as well as the
positions1 and positions2 series."""
# construct windows from the passed chromosomes 1 and positions 1
windows1 = assign_regions(window, binsize, chroms1, positions1, arms)
windows1.columns = [str(i) + "1" for i in windows1.columns]
# construct windows from the passed chromosomes 1 and positions 1
windows2 = assign_regions(window, binsize, chroms2, positions2, arms)
windows2.columns = [str(i) + "2" for i in windows2.columns]
windows = pd.concat((windows1, windows2), axis=1)
# concatenate windows
windows = pd.concat((windows1, windows2), axis=1)
# filter for mapping to different regions
windows_final = windows.loc[windows["region1"] == windows["region2"], :]
# subset data and rename regions
windows_small = windows_final[
["chrom1", "start1", "end1", "chrom2", "start2", "end2", "region1"]
]
windows_small.columns = [
"chrom1",
"start1",
"end1",
"chrom2",
"start2",
"end2",
"region",
]
return windows_small
def do_pileup_obs_exp(
clr: cooler.Cooler,
expected_df: pd.DataFrame,
snipping_windows: pd.DataFrame,
proc: int = 5,
collapse: bool = True,
) -> np.ndarray:
"""Takes a cooler file handle, an expected dataframe
constructed by getExpected, snipping windows constructed
by assignRegions and performs a pileup on all these regions
based on the obs/exp value. Returns a numpy array
that contains averages of all selected regions.
The collapse parameter specifies whether to return
the average window over all piles (collapse=True), or the individual
windows (collapse=False)."""
region_frame = get_regions_from_snipping_windows(expected_df)
oe_snipper = cooltools.snipping.ObsExpSnipper(
clr, expected_df, regions=bioframe.parse_regions(region_frame)
)
# set warnings filter to ignore RuntimeWarnings since cooltools
# does not check whether there are inf or 0 values in
# the expected dataframe
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
with multiprocess.Pool(proc) as pool:
# extract a matrix of obs/exp average values for each snipping_window
oe_pile = cooltools.snipping.pileup(
snipping_windows, oe_snipper.select, oe_snipper.snip, map=pool.map
)
if collapse:
# calculate the average of all windows
collapsed_pile = np.nanmean(oe_pile[:, :, :], axis=2)
return collapsed_pile
return oe_pile
def do_pileup_iccf(
clr: cooler.Cooler,
snipping_windows: pd.DataFrame,
proc: int = 5,
collapse: bool = True,
) -> np.ndarray:
"""Takes a cooler file handle and snipping windows constructed
by assignRegions and performs a pileup on all these regions
based on the corrected HiC counts. Returns a numpy array
that contains averages of all selected regions. The collapse
parameter specifies whether to return
the average window over all piles (collapse=True), or the individual
windows (collapse=False)."""
# get regions from snipping windows
region_frame = get_regions_from_snipping_windows(snipping_windows)
iccf_snipper = cooltools.snipping.CoolerSnipper(
clr, regions=bioframe.parse_regions(region_frame)
)
with multiprocess.Pool(proc) as pool:
iccf_pile = cooltools.snipping.pileup(
snipping_windows, iccf_snipper.select, iccf_snipper.snip, map=pool.map
)
if collapse:
# calculate the average of all windows
collapsed_pile_plus = np.nanmean(iccf_pile[:, :, :], axis=2)
return collapsed_pile_plus
return iccf_pile
def sliding_diamond(
array: np.ndarray, side_len: int = 6, center_x: bool = True
) -> Tuple[np.ndarray, np.ndarray]:
"""Will slide a diamond of side length 'sideLen'
down the diagonal of the passed array and return
the average values for each position and
the relative position of each value with respect
to the center of the array (in Bin units)"""
# initialize accumulators for diamond value and x-position
diamond_accumulator = list()
bin_accumulator = list()
if side_len % 2 == 0:
half_window = side_len
for i in range(0, (array.shape[0] - half_window + 1)):
# extract diamond
diamond_array = array[i : (i + half_window), i : (i + half_window)]
# set inf to nan for calculation of mean
diamond_array[np.isinf(diamond_array)] = np.nan
diamond_accumulator.append(np.nanmean(diamond_array))
# append x-value for this particular bin
bin_accumulator.append(
np.median(
range(
i,
(i + half_window),
)
)
)
else:
half_window = side_len // 2
for i in range(half_window, (array.shape[0] - half_window)):
# extract diamond
diamond_array = array[
i - half_window : (i + half_window) + 1,
i - half_window : (i + half_window) + 1,
]
# set inf to nan for calculation of mean
diamond_array[np.isinf(diamond_array)] = np.nan
diamond_accumulator.append(np.nanmean(diamond_array))
# append x-value for this particular bin
bin_accumulator.append(
np.median(
range(
i - half_window,
(i + half_window) + 1,
)
)
)
if center_x:
x_out = np.array(bin_accumulator - np.median(bin_accumulator))
else:
x_out = np.array(bin_accumulator)
return (x_out, np.array(diamond_accumulator))
def load_pairs(path: str) -> pd.DataFrame:
"""Function to load a .pairs or .pairsam file
into a pandas dataframe.
This only works for relatively small files!"""
# get handels for header and pairs_body
header, pairs_body = pairtools._headerops.get_header(
pairtools._fileio.auto_open(path, "r")
)
# extract column names from header
cols = pairtools._headerops.extract_column_names(header)
# read data into dataframe
frame = | pd.read_csv(pairs_body, sep="\t", names=cols) | pandas.read_csv |
import pandas as pd
import numpy as np
import scipy.stats
import matplotlib as plt
from scipy.stats import norm
from scipy.optimize import minimize
import ipywidgets as widgets
from IPython.display import display
import math
def drawdown(ret_ser: pd.Series):
"""
Lets Calculate it:
1. Compute wealth index
2. Compute previous peaks
3. Compute Drawdown - which is the wealth value as a percentage of the previous peak
"""
wealth_index = 1000*(1+ret_ser).cumprod()
prev_peak = wealth_index.cummax()
draw_down = (wealth_index-prev_peak)/prev_peak
return pd.DataFrame({
"Wealth": wealth_index,
"Previous Peak": prev_peak,
"Drawdown" : draw_down
})
def all_pfme():
"""
This Function reads all data in the Portfolios_Formed_on_ME_monthly_EW file.
"""
pfme_df = pd.read_csv("data/Portfolios_Formed_on_ME_monthly_EW.csv", index_col=0 , na_values=-99.99, parse_dates= True)
pfme_df.index = | pd.to_datetime(pfme_df.index, format="%Y%m") | pandas.to_datetime |
from sklearn.manifold import TSNE
from clustering import silhouette as sil
from data_processing import MulticlusteringExperimentUtils as expUtils
# Keep the clustering experiments that involve outliers here
from clustering.KMeansVariations import kMeans_baseline, kMeans_baseline_high_iteration, kMeans_baseline_random_init, \
kMeans_baseline_4_clusters, kMeans_baseline_3_clusters, kMeans_baseline_2_clusters, kMeans_baseline_2_clusters_low_iter,\
kMeans_baseline_2_clusters_high_iter, kMeans_baseline_highest_iteration, kMeans_baseline_highest_iteration_2_clusters,\
kMeans_baseline_5_clusters, kMeans_baseline_3_clusters_random_high_iter, kMeans_baseline_3_clusters_random_med_iter
from clustering.tsne import makeTSNEPlot
import pandas as pd
# --- Remove all of the outliers for the big features ----
# average hold time
from data_processing.CleanDataUtils import feature_set, feature_set_complete_vectors_only,feature_set_more_even_vectors, feature_set_3_labels_completeSamplesOnly,feature_set_3_labels_AllSamples, feature_set_4049_reduced
from data_processing.dataUtils import getColumnZScores, removeOutliersByZScore
def removeOutliersAndNormalizeData(feature_set_input, threshold):
feature1 = 'avgSeekTime'
feature2 = 'avgHoldTime'
feature3 = 'averageNgramTime'
feature_set_outliers_removed = feature_set_input
feature_set_outliers_removed = getColumnZScores(pd.DataFrame(feature_set_outliers_removed), feature1)
feature_set_outliers_removed = getColumnZScores(pd.DataFrame(feature_set_outliers_removed), feature2)
feature_set_outliers_removed = getColumnZScores(pd.DataFrame(feature_set_outliers_removed), feature3)
feature_set_outliers_removed = removeOutliersByZScore(feature_set_outliers_removed, feature1, threshold)
feature_set_outliers_removed = removeOutliersByZScore(feature_set_outliers_removed, feature2, threshold)
feature_set_outliers_removed = removeOutliersByZScore(feature_set_outliers_removed, feature3, threshold)
feature_set_outliers_removed = expUtils.normalizeLabeledData(pd.DataFrame(feature_set_outliers_removed))
feature_set_outliers_removed = feature_set_outliers_removed.astype(float)
feature_set_outliers_removed_labels = pd.DataFrame(feature_set_outliers_removed).get(['label'])
feature_set_outliers_removed.drop(columns=['label', 'userID'], inplace=True)
return feature_set_outliers_removed, feature_set_outliers_removed_labels
def runExperiment(expName, kmeans, labels, feature_set):
kmeans_res = kmeans.fit_predict(feature_set)
cluster1, cluster2, cluster3, cluster4, cluster5, cluster6 = \
expUtils.getClusterBucketsForMultiClustering(labels, kmeans_res)
expUtils.getF1Score(cluster1, cluster2, cluster3, cluster4, cluster5, cluster6, expName)
feature_set_labeled = pd.DataFrame(feature_set).join(labels)
feature_set_labeled['cluster'] = kmeans_res.copy()
sil.makeSilhouettePlot(feature_set, kmeans_res, expName)
makeTSNEPlot(feature_set_labeled, experimentName=expName)
expUtils.getAverageForAll(cluster1, cluster2, cluster3, cluster4, cluster5, cluster6,
expName, labels)
feature1 = 'avgSeekTime'
feature2 = 'avgHoldTime'
feature3 = 'averageNgramTime'
set_combined_labels_completeOnly = feature_set_3_labels_completeSamplesOnly
normalized_completeOnly_combined, normalized_completeOnly_combined_labels = \
removeOutliersAndNormalizeData(set_combined_labels_completeOnly, 3)
runExperiment("jonstest10_Kmeans_baseline_completeVectors_3Clusters", kMeans_baseline_3_clusters, normalized_completeOnly_combined_labels, normalized_completeOnly_combined)
set_combined_labels_allSamples = feature_set_3_labels_AllSamples
normalized_set_combined_labels_allSamples, set_combined_labels_allSamples_labels = \
removeOutliersAndNormalizeData(set_combined_labels_allSamples, 3)
runExperiment("jonstest10_Kmeans_baseline_AllSamples_3Clusters", kMeans_baseline_3_clusters, set_combined_labels_allSamples_labels, normalized_set_combined_labels_allSamples)
runExperiment("jonstest10_Kmeans_baseline_completeVectors_3Clusters_randomInitMedIter", kMeans_baseline_3_clusters_random_med_iter, normalized_completeOnly_combined_labels, normalized_completeOnly_combined)
runExperiment("jonstest10_Kmeans_baseline_completeVectors_3Clusters_randomInitHighIter", kMeans_baseline_3_clusters_random_high_iter, normalized_completeOnly_combined_labels, normalized_completeOnly_combined)
runExperiment("jonstest9_Kmeans_baseline_AllSamples_3Clusters_randomInitMedIter", kMeans_baseline_3_clusters_random_med_iter, set_combined_labels_allSamples_labels, normalized_set_combined_labels_allSamples)
runExperiment("jonstest9_Kmeans_baseline_AllSamples_3Clusters_randomInitHighIter", kMeans_baseline_3_clusters_random_high_iter, set_combined_labels_allSamples_labels, normalized_set_combined_labels_allSamples)
middleage_reduced = feature_set_4049_reduced
normalized_middleage_reduced, middleage_reduced_labels = \
removeOutliersAndNormalizeData(middleage_reduced, 3)
runExperiment("middleage_reduced_baseline", kMeans_baseline, middleage_reduced_labels, normalized_middleage_reduced)
runExperiment("middleage_reduced_baseline", kMeans_baseline, middleage_reduced_labels, normalized_middleage_reduced)
set = feature_set_complete_vectors_only
outliersRemovedNotNormalized = getColumnZScores(pd.DataFrame(set), feature1)
outliersRemovedNotNormalized = getColumnZScores(pd.DataFrame(outliersRemovedNotNormalized), feature2)
outliersRemovedNotNormalized = getColumnZScores(pd.DataFrame(outliersRemovedNotNormalized), feature3)
outliersRemovedNotNormalized = removeOutliersByZScore(outliersRemovedNotNormalized, feature1, 3)
outliersRemovedNotNormalized = removeOutliersByZScore(outliersRemovedNotNormalized, feature2, 3)
outliersRemovedNotNormalized = removeOutliersByZScore(outliersRemovedNotNormalized, feature3, 3)
set_normalized = expUtils.normalizeLabeledData(pd.DataFrame(set))
set_normalized = set_normalized.astype(float)
print(set_normalized)
set_normalized_labels = pd.DataFrame(set_normalized).get(['label'])
print(set_normalized_labels)
set_normalized.drop(columns=['label', 'userID'], inplace=True)
runExperiment("jonstest8_Kmeans_baseline_completeVectors", kMeans_baseline, set_normalized_labels, set_normalized)
#with outliers removed
set_complete_vectors = set
feature1 = 'avgSeekTime'
feature2 = 'avgHoldTime'
feature3 = 'averageNgramTime'
#
set_complete_vectors = getColumnZScores(pd.DataFrame(set_complete_vectors), feature1)
set_complete_vectors = getColumnZScores(pd.DataFrame(set_complete_vectors), feature2)
set_complete_vectors = getColumnZScores(pd.DataFrame(set_complete_vectors), feature3)
set_complete_vectors = removeOutliersByZScore(set_complete_vectors, feature1, 3)
set_complete_vectors = removeOutliersByZScore(set_complete_vectors, feature2, 3)
set_complete_vectors = removeOutliersByZScore(set_complete_vectors, feature3, 3)
set_complete_vectors = expUtils.normalizeLabeledData(pd.DataFrame(set_complete_vectors))
set_complete_vectors = set_complete_vectors.astype(float)
set_complete_vectors_COPY = set_complete_vectors
set_complete_vectors_labels = pd.DataFrame(set_complete_vectors).get(['label'])
set_complete_vectors.drop(columns=['label', 'userID'], inplace=True)
runExperiment("jonstest8_Kmeans_baseline_completeVectors_outliersRemoved_3clusters", kMeans_baseline_3_clusters, set_complete_vectors_labels, set_complete_vectors)
runExperiment("jonstest8_Kmeans_baseline_completeVectors_outliersRemoved_highest_iters", kMeans_baseline_highest_iteration, set_complete_vectors_labels, set_complete_vectors)
#EXPERIMENT RUNS WITH ONLY COMPLETE VECTORS
even_vectors = feature_set_more_even_vectors
even_vectors = getColumnZScores(pd.DataFrame(even_vectors), feature1)
even_vectors = getColumnZScores(pd.DataFrame(even_vectors), feature2)
even_vectors = getColumnZScores( | pd.DataFrame(even_vectors) | pandas.DataFrame |
"""
BFR
"""
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
class BFR(object):
class Local(object):
def __init__(self, n_cluster, soft_n_cluster=None, shrink=0.5,
input_file_path=None, iter_func=None,
chunk_size=None, kmeans_params=None, print_log=True,
write_to_file=False, output_file=None, cache_labels=None):
"""
:param n_cluster: int
:param soft_n_cluster: int
Used to roughly cluster points.
:param shrink: float=0~1.0
Used to reduce the threshold of Clustering Algorithm.
:param input_file_path: str
The file to read the input results.
If parameter "data" is not specified, this parameter is used to build a generator.
:param iter_func: function
The function used to build iterator. The iterator returns pandas.DataFrame with index.
:param output_file_path: str
The file to store the output results.
"""
self._n_cluster = n_cluster
self._soft_n_cluster = soft_n_cluster if soft_n_cluster is not None else n_cluster ** 2
self._shrink = shrink
self._print_log = print_log
self._data_generator = None
self.clusters = self.labels = None
self._write_to_file, self._output_file = write_to_file, output_file
if cache_labels is None:
self._cache_labels = not write_to_file
else:
self._cache_labels = cache_labels
if isinstance(kmeans_params, dict):
self._kmeans_params = kmeans_params
else:
self._kmeans_params = {}
if input_file_path is None and iter_func is None:
print("No data input. Please call add_data(generator) to add data to the model.")
else:
self.add_data(iter_func=iter_func, input_file_path=input_file_path, chunk_size=chunk_size)
def add_data(self, iter_func, input_file_path, chunk_size):
"""
:param input_file_path: str
:param iter_func: function
:param chunk_size: int
"""
if callable(iter_func):
self._data_generator = iter_func
elif isinstance(input_file_path, str):
self._data_generator = lambda: pd.read_table(input_file_path,
delimiter="[^0-9a-zA-Z\.\n]", dtype=np.float64,
chunksize=chunk_size)
else:
raise ValueError
def run(self):
"""
DS: (n_clusters, [n, SUM, SUM_SQUARE])
CS: (n_clusters, [n, SUM, SUM_SQUARE])
RS: (n_samples, dimension)
"""
iterator_vectors = self._data_generator()
vectors = next(iterator_vectors)
n_dim = vectors.shape[1]
"""
Initialize DS, CS, RS.
"""
DS, CS, RS = self._initialization(vectors, n_dim)
if self._print_log:
print("Tasks start. Start to print intermediate results ...")
self.print_log(1, DS, CS, RS)
"""
Iteratively process chunks
"""
for i, vectors in enumerate(iterator_vectors):
DS, CS, RS = self._iteration(vectors, n_dim, DS, CS, RS)
if self._print_log:
self.print_log(i+2, DS, CS, RS)
DS, CS = self._last_round(DS, CS, n_dim)
self.clusters = DS[:, 1:n_dim + 1]
if self._print_log:
self.print_log("final", DS, CS, RS)
"""
Save the results: cluster coordinates and point labels.
"""
if self._cache_labels and self._write_to_file:
self.labels = pd.concat(list(self.classify(DS, self._n_cluster, n_dim)))
self.labels.to_csv(self._output_file, mode="w", sep=",")
elif self._cache_labels:
self.labels = pd.concat(list(self.classify(DS, self._n_cluster, n_dim)))
elif self._write_to_file:
| pd.DataFrame(columns=["cluster"]) | pandas.DataFrame |
# import csv
# with open('C:/Users/Eddie/Desktop/python-playground/Week 4/day 25 - CSV Data + Pandas Library/weather_data.csv') as data:
# weather_data = csv.reader(data)
# temperature = []
# for row in weather_data:
# if row[1] == 'temp':
# continue
# temperature.append(int(row[1]))
# print(temperature)
import pandas
# access and read CSV data using pandas library
data = pandas.read_csv('C:/Users/Eddie/Desktop/python-playground/Week 4/day 25 - CSV Data + Pandas Library/weather_data.csv')
# convert CSV dataframe into python object
# data_dict = data.to_dict()
# print(data_dict)
# conver CSV series into python object list
# temp = data['temp'].to_list()
# average = sum(temp) / len(temp)
# print(average)
# using methods on series and dataframe
# average_temp = data['temp'].mean()
# print(average_temp)
# max_temp = data['temp'].max()
# print(max_temp)
# selecting row by a value
# monday = data[data.day == 'Monday']
# print(int(monday.temp)*(9/5) + 32)
# hottest_day = data[data.temp == data.temp.max()]
# print(hottest_day)
# creating a dataframe
student_dict = {
'student': ['Amy', 'James', 'Angela'],
'score': [76, 56, 65]
}
student = | pandas.DataFrame(student_dict) | pandas.DataFrame |
from http.server import BaseHTTPRequestHandler, HTTPServer
import socketserver
import pickle
import urllib.request
import json
from pprint import pprint
from pandas.io.json import json_normalize
import pandas as pd
from sklearn import preprocessing
from sklearn.preprocessing import PolynomialFeatures
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import Ridge
from math import sqrt
import os
import errno
from pymongo import MongoClient
import urllib.parse as urlparse
from influxdb import InfluxDBClient
from pymongo import MongoClient
import pandas as pd
from pandas.io.json import json_normalize
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import TheilSenRegressor
from sklearn.datasets import make_regression
class Terminus(BaseHTTPRequestHandler):
def getAllNodeNames(self,client):
queryResult = client.query("SHOW TAG VALUES FROM uptime WITH KEY=nodename;")
nodeNames_temp = list(queryResult.get_points())
dfnodeNames = pd.DataFrame(nodeNames_temp)
allNodeNames = dfnodeNames[:]["value"]
return allNodeNames
def getNamespaceNames(self,client,node):
nsQuery = client.query("SHOW TAG VALUES FROM uptime WITH KEY=namespace_name WHERE nodename = '"+node+"';")
nsQuery_temp = list(nsQuery.get_points())
dfnsNames = pd.DataFrame(nsQuery_temp)
allnsNames = dfnsNames[:]["value"]
return allnsNames
def getAllPodNames(self,client,node,ns_name):
queryResult = client.query("SHOW TAG VALUES FROM uptime WITH KEY = pod_name WHERE namespace_name = '"+ns_name+"' AND nodename = '"+node+"';")
podNames_temp = list(queryResult.get_points())
dfpodNames = | pd.DataFrame(podNames_temp) | pandas.DataFrame |
import numpy as np
import pytest
from pandas import (
DataFrame,
IndexSlice,
NaT,
Timestamp,
)
import pandas._testing as tm
pytest.importorskip("jinja2")
from pandas.io.formats.style import Styler
from pandas.io.formats.style_render import _str_escape
@pytest.fixture
def df():
return DataFrame(
data=[[0, -0.609], [1, -1.228]],
columns=["A", "B"],
index=["x", "y"],
)
@pytest.fixture
def styler(df):
return Styler(df, uuid_len=0)
def test_display_format(styler):
ctx = styler.format("{:0.1f}")._translate(True, True)
assert all(["display_value" in c for c in row] for row in ctx["body"])
assert all([len(c["display_value"]) <= 3 for c in row[1:]] for row in ctx["body"])
assert len(ctx["body"][0][1]["display_value"].lstrip("-")) <= 3
def test_format_dict(styler):
ctx = styler.format({"A": "{:0.1f}", "B": "{0:.2%}"})._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "0.0"
assert ctx["body"][0][2]["display_value"] == "-60.90%"
def test_format_string(styler):
ctx = styler.format("{:.2f}")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "0.00"
assert ctx["body"][0][2]["display_value"] == "-0.61"
assert ctx["body"][1][1]["display_value"] == "1.00"
assert ctx["body"][1][2]["display_value"] == "-1.23"
def test_format_callable(styler):
ctx = styler.format(lambda v: "neg" if v < 0 else "pos")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "pos"
assert ctx["body"][0][2]["display_value"] == "neg"
assert ctx["body"][1][1]["display_value"] == "pos"
assert ctx["body"][1][2]["display_value"] == "neg"
def test_format_with_na_rep():
# GH 21527 28358
df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"])
ctx = df.style.format(None, na_rep="-")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "-"
assert ctx["body"][0][2]["display_value"] == "-"
ctx = df.style.format("{:.2%}", na_rep="-")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "-"
assert ctx["body"][0][2]["display_value"] == "-"
assert ctx["body"][1][1]["display_value"] == "110.00%"
assert ctx["body"][1][2]["display_value"] == "120.00%"
ctx = df.style.format("{:.2%}", na_rep="-", subset=["B"])._translate(True, True)
assert ctx["body"][0][2]["display_value"] == "-"
assert ctx["body"][1][2]["display_value"] == "120.00%"
def test_format_non_numeric_na():
# GH 21527 28358
df = DataFrame(
{
"object": [None, np.nan, "foo"],
"datetime": [None, NaT, Timestamp("20120101")],
}
)
with tm.assert_produces_warning(FutureWarning):
ctx = df.style.set_na_rep("NA")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "NA"
assert ctx["body"][0][2]["display_value"] == "NA"
assert ctx["body"][1][1]["display_value"] == "NA"
assert ctx["body"][1][2]["display_value"] == "NA"
ctx = df.style.format(None, na_rep="-")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "-"
assert ctx["body"][0][2]["display_value"] == "-"
assert ctx["body"][1][1]["display_value"] == "-"
assert ctx["body"][1][2]["display_value"] == "-"
def test_format_clear(styler):
assert (0, 0) not in styler._display_funcs # using default
styler.format("{:.2f")
assert (0, 0) in styler._display_funcs # formatter is specified
styler.format()
assert (0, 0) not in styler._display_funcs # formatter cleared to default
@pytest.mark.parametrize(
"escape, exp",
[
("html", "<>&"%$#_{}~^\\~ ^ \\ "),
(
"latex",
'<>\\&"\\%\\$\\#\\_\\{\\}\\textasciitilde \\textasciicircum '
"\\textbackslash \\textasciitilde \\space \\textasciicircum \\space "
"\\textbackslash \\space ",
),
],
)
def test_format_escape_html(escape, exp):
chars = '<>&"%$#_{}~^\\~ ^ \\ '
df = DataFrame([[chars]])
s = | Styler(df, uuid_len=0) | pandas.io.formats.style.Styler |
from functools import partial
from unittest import TestCase, main as unittest_main
import numpy as np
import pandas as pd
from scipy.special import digamma
from scipy.stats import beta, norm
from gbstats.bayesian.dists import Beta, Norm
DECIMALS = 5
round_ = partial(np.round, decimals=DECIMALS)
def roundsum(x, decimals=DECIMALS):
return np.round(np.sum(x), decimals=decimals)
class TestBeta(TestCase):
def test_posterior(self):
prior = 1, 1
data = 1, 2
result = Beta.posterior(prior, data)
outcome = (2, 2)
for res, out in zip(result, outcome):
self.assertEqual(res, out)
prior = 1, 1
data = pd.Series([1, 10]), pd.Series([2, 20])
result = Beta.posterior(prior, data)
outcome = pd.Series([2, 11]), pd.Series([2, 11])
for res, out in zip(result, outcome):
| pd.testing.assert_series_equal(res, out) | pandas.testing.assert_series_equal |
import os
import pandas as pd
import json
import cv2
def CSV_300W_LP(data_dir):
folders = [folder for folder in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, folder))]
images = []
for idx, folder in enumerate(folders):
folder_path = os.path.join(data_dir, folder)
folder_images = [image[:-4] for image in os.listdir(folder_path) if '.jpg' in image]
for image in folder_images:
image_path = os.path.join(folder, image)
images.append(image_path)
df = pd.DataFrame(images)
df.to_csv(os.path.join(data_dir, '300W_LP.txt'), header=False, index=False)
def CSV_custom(data_dir, data_type, output_dir, padding_perc = 0.0):
images_dir = os.path.join(data_dir,'images', data_type)
annotations = os.path.join(data_dir, 'annotations', '%s_400_rigi_essemble.json' %data_type)
with open(annotations, 'r') as f:
annon_dict = json.loads(f.read())
# Initializes variables
avail_imgs = annon_dict.keys()
x1_list = []
x2_list = []
y1_list = []
y2_list = []
roll_list = []
pitch_list = []
yaw_list = []
image_paths = []
# Gets path for all images
images = [os.path.join(images_dir, image) for image in os.listdir(images_dir) if 'jpg' or 'png' in image]
for image in images:
# read image (to determine size later)
img = cv2.imread(image)
# gets images Id
img_id = os.path.basename(image)[:-4].lstrip('0')
# ensures the image is in the dictionary key
if not img_id in avail_imgs:
continue
for idx, annon in enumerate(annon_dict[img_id].keys()):
# ensures we have a face detected
if not annon_dict[img_id][annon]['head_pose_pred']:
continue
bbox = annon_dict[img_id][annon]['head_pose_pred']['box']
x1 = bbox['x1']
x2 = bbox['x2']
y1 = bbox['y1']
y2 = bbox['y2']
# add padding to face
upper_y = int(max(0, y1 - (y2 - y1) * padding_perc))
lower_y = int(min(img.shape[0], y2 + (y2 - y1) * padding_perc))
left_x = int(max(0, x1 - (x2 - x1) * padding_perc))
right_x = int(min(img.shape[1], x2 + (x2 - x1) * padding_perc))
# get head pose labels
roll = annon_dict[img_id][annon]['head_pose_pred']['roll']
pitch = annon_dict[img_id][annon]['head_pose_pred']['pitch']
yaw = annon_dict[img_id][annon]['head_pose_pred']['yaw']
image_paths.append(os.path.basename(image)[:-4])
x1_list.append(left_x)
x2_list.append(right_x)
y1_list.append(upper_y)
y2_list.append(lower_y)
roll_list.append(roll)
pitch_list.append(pitch)
yaw_list.append(yaw)
# saves data in RetinaNet format
data = {'image_path': image_paths,
'x1': x1_list, 'x2': x2_list,
'y1': y1_list, 'y2': y2_list,
'roll': roll_list, 'pitch': pitch_list,
'yaw': yaw_list}
# Create DataFrame
df = pd.DataFrame(data)
df.to_csv(os.path.join(output_dir, '%s_labels_headpose_essemble.csv' %data_type), index=False, header=True)
def CSV_AFLW2000(data_dir):
images = [image[:-4] for image in os.listdir(data_dir) if '.jpg' in image]
df = pd.DataFrame(images)
df.to_csv(os.path.join(data_dir, 'AFLW2000.txt'), header=False, index=False)
def CSV_BIKI(data_dir):
folders = [folder for folder in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, folder))]
images = []
for idx, folder in enumerate(folders):
if folder == 'faces':
continue
folder_path = os.path.join(data_dir, folder)
# Initiating detector and coco annotations
folder_images = [os.path.join(folder, frame[:-8]) for frame in os.listdir(folder_path) if 'png' in frame]
for image in folder_images:
images.append(image)
df = | pd.DataFrame(images) | pandas.DataFrame |
import numpy as np
from sympy import *
from scipy.integrate import odeint
import pandas as pd
import time
import matplotlib.pyplot as plt
from matplotlib import rcParams
import webbrowser
import random
import copy
import csv
import time
def coeff_vect(mtx):
"""Входные данные: mtx
Выходныее данные: mtx1
Функция выполняет ... действиия
"""
mtx1 = []
for i in range(len(mtx)):
mtx1.append(mtx[i][-1])
return mtx1
def det_my_matrix(mtx):
Lmtx=len(mtx)
if Lmtx==1:
return mtx[0][0]
if Lmtx==2:
return mtx[0][0]*mtx[1][1]-(mtx[0][1]*mtx[1][0])
result=0
for i in range(Lmtx):
factor=1
if i % 2:
factor=-1
mtx2=[]
for row in range(Lmtx):
mtx3=[]
for col in range(Lmtx):
if row!=0 and col!=i:
mtx3.append(mtx[row][col])
if mtx3:
mtx2.append(mtx3)
result+=factor*mtx[0][i]*det_my_matrix(mtx2)
return(result)
def GJ_method_2(mtx1):
mtx = copy.deepcopy(mtx1)
n = len(mtx)
if det_my_matrix(mtx) == 0:
return 'Вырожденная матрица. Нормально не считается этим методом'
for itr in range(n):
mtx[itr] = [mtx[itr][i] / mtx[itr][itr] for i in range(n + 1)]
for col in range(n):
if col != itr:
mtx[col] = [mtx[col][i] - mtx[itr][i] * mtx[col][itr] for i in range(n + 1)]
return coeff_vect(mtx)
def quadratic_function(X,Y):
sumX4 = sum([X[i] * X[i] * X[i] * X[i] for i in range(len(X))])
sumX3 = sum([X[i] * X[i] * X[i] for i in range(len(X))])
sumX2 = sum([X[i] * X[i] for i in range(len(X))])
sumXY = sum([X[i] * Y[i] for i in range(len(X))])
sumX2Y = sum([X[i] * X[i] * Y[i] for i in range(len(X))])
matrix = [[sumX4, sumX3, sumX2, sumX2Y], [sumX3, sumX2, sum(X), sumXY], [sumX2, sum(X), len(X), sum(Y)]]
GJ_method_abc = GJ_method_2(matrix)
a = GJ_method_abc[0]
b = GJ_method_abc[1]
c = GJ_method_abc[2]
x = Symbol('x')
result = (eval('x')**2)*a + b*eval('x') + c
return(result)
def newton1_function(X,Y):
h=X[1]-X[0]
# Найдем конечные разности
y = copy.deepcopy(Y)
deltay = [y[0]]
while len(y) != 1:
y = [y[i]-y[i-1] for i in range(1,len(y))]
deltay.append(y[0])
result=deltay[0]
x = Symbol('x')
deltax = [eval('x')-X[0]]
for i in range(1,len(deltay)-1):
deltax.append(deltax[i-1]*(eval('x') - X[i]))
for i in range(1,len(deltax)+1):
deltay[i] /= h**(i) * factorial(i)
result+=(deltay[i]*deltax[i-1])
return result
def function(x,y,z,col):
if col == 1:
f = [eval(input("y' = "))]
elif col == 2:
f1 = eval(input("y' = "))
f2 = eval(input("z' = "))
f = [f1,f2]
return f
def diff_my(y1, y0, dx):
y = (y1 - y0) / dx
return y
def iteration():
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
system = input('Хотите ли вы ввести систему из двух уравнений? (Да/Нет):')
if system.lower() == 'нет':
func = function(x,y,z,1)
z0 = '-'
try:
x0y0 = list(map(float,input('Введите начальные условия [x0,y0]: ').split()))
x0 = x0y0[0]
y0 = x0y0[1]
except ValueError:
print('Ошибка ввода. Попробуйте ещё раз.')
return iteration()
except IndexError:
print('Введено слишком мало чисел. Попробуйте ещё раз.')
return iteration()
try:
ab = list(map(int,input('Введите желаемый интервал [a,b]: ').split()))
a = ab[0]
b = ab[1]
except ValueError:
print('Ошибка ввода. Попробуйте ещё раз.')
return iteration()
except IndexError:
print('Введено слишком мало чисел. Попробуйте ещё раз.')
return iteration()
if a > b:
print(f'Ошибка в вводе интервала! ({a}!>{b})!!')
return iteration()
elif system.lower() == 'да':
func = function(x,y,z,2)
try:
x0y0 = list(map(float,input('Введите начальные условия [x0,y0,z0]: ').split()))
x0 = x0y0[0]
y0 = x0y0[1]
z0 = x0y0[2]
except ValueError:
print('Ошибка ввода. Попробуйте ещё раз.')
return iteration()
except IndexError:
print('Введено слишком мало чисел. Попробуйте ещё раз.')
return iteration()
try:
ab = list(map(int,input('Введите желаемый интервал [a,b]: ').split()))
a = ab[0]
b = ab[1]
except ValueError:
print('Ошибка ввода. Попробуйте ещё раз.')
return iteration()
except IndexError:
print('Введено слишком мало чисел. Попробуйте ещё раз.')
return iteration()
if a > b:
print(f'Ошибка в вводе интервала! ({a}!>{b})!!')
return iteration()
n = int(input('Введите количество точек (n): '))
return(func,x0,y0,z0,a,b,n)
# Одно уравнение
def iteration_once():
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
func = function(x,y,z,1)
try:
x0y0 = list(map(float,input('Введите начальные условия [x0,y0]: ').split()))
x0 = x0y0[0]
y0 = x0y0[1]
except ValueError:
print('Ошибка ввода. Попробуйте ещё раз.')
return iteration_once()
except IndexError:
print('Введено слишком мало чисел. Попробуйте ещё раз.')
return iteration_once()
try:
ab = list(map(int,input('Введите желаемый интервал [a,b]: ').split()))
a = ab[0]
b = ab[1]
except ValueError:
print('Ошибка ввода. Попробуйте ещё раз.')
return iteration_once()
except IndexError:
print('Введено слишком мало чисел. Попробуйте ещё раз.')
return iteration_once()
if a > b:
print(f'Ошибка в вводе интервала! ({a}!>{b})!!')
return iiteration_once()
n = int(input('Введите количество точек (n): '))
return(func,x0,y0,a,b,n)
# Система уравнений
def iteration_system():
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
func = function(x,y,z,2)
try:
x0y0 = list(map(float,input('Введите начальные условия [x0,y0,z0]: ').split()))
x0 = x0y0[0]
y0 = x0y0[1]
z0 = x0y0[2]
except ValueError:
print('Ошибка ввода. Попробуйте ещё раз.')
return iteration_system()
except IndexError:
print('Введено слишком мало чисел. Попробуйте ещё раз.')
return iteration_system()
try:
ab = list(map(int,input('Введите желаемый интервал [a,b]: ').split()))
a = ab[0]
b = ab[1]
except ValueError:
print('Ошибка ввода. Попробуйте ещё раз.')
return iteration_system()
except IndexError:
print('Введено слишком мало чисел. Попробуйте ещё раз.')
return iteration_system()
if a > b:
print(f'Ошибка в вводе интервала! ({a}!>{b})!!')
return iteration_system()
n = int(input('Введите количество точек (n): '))
return(func,x0,y0,z0,a,b,n)
def euler(func, x0, y0, z0, a, b, n):
h = (b-a)/n
x = np.arange(x0,x0+(b-a),h)
X = Symbol('x')
Y = Symbol('y')
Z = Symbol('z')
if type(z0) == type(''):
res = [[i, x[i], 0] for i in range(n)]
res[0][2] = y0
for i in range(1,n):
res[i][0] = i
res[i][1] = x[i]
res[i][2] = res[i-1][2] + (h*func[0].subs([(X,res[i-1][1]),(Y,res[i-1][2])]))
else:
res = [[i, x[i], 0, 0] for i in range(n)]
res[0][2] = y0
res[0][3] = z0
for i in range(1,n):
res[i][0] = i
res[i][1] = x[i]
res[i][2] = res[i-1][2] + (h*func[0].subs([(X,res[i-1][1]),(Y,res[i-1][2]),(Z,res[i-1][3])]))
res[i][3] = res[i-1][3] + (h*func[1].subs([(X,res[i-1][1]),(Y,res[i-1][2]),(Z,res[i-1][3])]))
return res
# Одно уравнение
def euler_once(func, x0, y0, a, b, n):
h = (b-a)/n
x = np.arange(x0,x0+(b-a),h)
X = Symbol('x')
Y = Symbol('y')
Z = Symbol('z')
res = [[i, x[i], 0] for i in range(n)]
res[0][2] = y0
for i in range(1,n):
res[i][0] = i
res[i][1] = x[i]
res[i][2] = res[i-1][2] + (h*func[0].subs([(X,res[i-1][1]),(Y,res[i-1][2])]))
return res
# Система уравнений
def euler_system(func, x0, y0, z0, a, b, n):
h = (b-a)/n
x = np.arange(x0,x0+(b-a),h)
X = Symbol('x')
Y = Symbol('y')
Z = Symbol('z')
res = [[i, x[i], 0, 0] for i in range(n)]
res[0][2] = y0
res[0][3] = z0
for i in range(1,n):
res[i][0] = i
res[i][1] = x[i]
res[i][2] = res[i-1][2] + (h*func[0].subs([(X,res[i-1][1]),(Y,res[i-1][2]),(Z,res[i-1][3])]))
res[i][3] = res[i-1][3] + (h*func[1].subs([(X,res[i-1][1]),(Y,res[i-1][2]),(Z,res[i-1][3])]))
return res
def eulercauchy(func, x0, y0, z0, a, b, n):
h = (b-a)/n
x = np.arange(x0,x0+(b-a),h)
X = Symbol('x')
Y = Symbol('y')
Z = Symbol('z')
if type(z0) == type(''):
_y = [0]*n
_y[0] = y0
for i in range(1,n):
_y[i] = _y[i-1] + (h*func[0].subs([(X,x[i-1]),(Y,_y[i-1])]))
res = [[i, x[i], 0] for i in range(n)]
res[0][2] = y0
for i in range(1,n):
res[i][2] = res[i-1][2] + (h/2) * ((func[0].subs([(X,x[i-1]),(Y,res[i-1][2])]))+(func[0].subs([(X,x[i]),(Y,_y[i])])))
else:
_y = [0]*n
_z = [0]*n
_y[0] = y0
_z[0] = z0
for i in range(1,n):
_y[i] = _y[i-1] + (h*func[0].subs([(X,x[i-1]),(Y,_y[i-1]),(Z,_z[i-1])]))
_z[i] = _z[i-1] + (h*func[1].subs([(X,x[i-1]),(Y,_y[i-1]),(Z,_z[i-1])]))
res = [[i, x[i], 0, 0] for i in range(n)]
res[0][2] = y0
res[0][3] = z0
for i in range(1,n):
res[i][2] = res[i-1][2] + (h/2) * ((func[0].subs([(X,x[i-1]),(Y,res[i-1][2]),(Z,res[i-1][3])]))+(func[0].subs([(X,x[i]),(Y,_y[i]),(Z,_z[i])])))
res[i][3] = res[i-1][3] + (h/2) * ((func[1].subs([(X,x[i-1]),(Y,res[i-1][2]),(Z,res[i-1][3])]))+(func[1].subs([(X,x[i]),(Y,_y[i]),(Z,_z[i])])))
return res
# Одно уравнение
def eulercauchy_once(func, x0, y0, a, b, n):
h = (b-a)/n
x = np.arange(x0,x0+(b-a),h)
X = Symbol('x')
Y = Symbol('y')
Z = Symbol('z')
_y = [0]*n
_y[0] = y0
for i in range(1,n):
_y[i] = _y[i-1] + (h*func[0].subs([(X,x[i-1]),(Y,_y[i-1])]))
res = [[i, x[i], 0] for i in range(n)]
res[0][2] = y0
for i in range(1,n):
res[i][2] = res[i-1][2] + (h/2) * ((func[0].subs([(X,x[i-1]),(Y,res[i-1][2])]))+(func[0].subs([(X,x[i]),(Y,_y[i])])))
return res
# Система уравнений
def eulercauchy_system(func, x0, y0, z0, a, b, n):
h = (b-a)/n
x = np.arange(x0,x0+(b-a),h)
X = Symbol('x')
Y = Symbol('y')
Z = Symbol('z')
_y = [0]*n
_z = [0]*n
_y[0] = y0
_z[0] = z0
for i in range(1,n):
_y[i] = _y[i-1] + (h*func[0].subs([(X,x[i-1]),(Y,_y[i-1]),(Z,_z[i-1])]))
_z[i] = _z[i-1] + (h*func[1].subs([(X,x[i-1]),(Y,_y[i-1]),(Z,_z[i-1])]))
res = [[i, x[i], 0, 0] for i in range(n)]
res[0][2] = y0
res[0][3] = z0
for i in range(1,n):
res[i][2] = res[i-1][2] + (h/2) * ((func[0].subs([(X,x[i-1]),(Y,res[i-1][2]),(Z,res[i-1][3])]))+(func[0].subs([(X,x[i]),(Y,_y[i]),(Z,_z[i])])))
res[i][3] = res[i-1][3] + (h/2) * ((func[1].subs([(X,x[i-1]),(Y,res[i-1][2]),(Z,res[i-1][3])]))+(func[1].subs([(X,x[i]),(Y,_y[i]),(Z,_z[i])])))
return res
def rungekutta(func, x0, y0, z0, a, b, n):
h = (b-a)/n
x = np.arange(x0,x0+(b-a),h)
X = Symbol('x')
Y = Symbol('y')
Z = Symbol('z')
if type(z0) == type(''):
res = [[i, x[i], 0] for i in range(n)]
res[0][2] = y0
for i in range(1,n):
k1 = h * func[0].subs([(X,res[i-1][1]),(Y,res[i-1][2])])
k2 = h * func[0].subs([(X,(res[i-1][1])+h/2),(Y,(res[i-1][2])+k1/2)])
k3 = h * func[0].subs([(X,(res[i-1][1])+h/2),(Y,(res[i-1][2])+k2/2)])
k4 = h * func[0].subs([(X,(res[i-1][1])+h),(Y,(res[i-1][2])+k3)])
res[i][2] = res[i-1][2] + 1/6 * (k1 + 2*k2 + 2*k3 + k4)
else:
res = [[i, x[i], 0, 0] for i in range(n)]
res[0][2] = y0
res[0][3] = z0
for i in range(1,n):
k1y = h * func[0].subs([(X,res[i-1][1]),(Y,res[i-1][2]), (Z,res[i-1][3])])
k2y = h * func[0].subs([(X,(res[i-1][1])+h/2),(Y,(res[i-1][2])+k1y/2), (Z,(res[i-1][3])+k1y/2)])
k3y = h * func[0].subs([(X,(res[i-1][1])+h/2),(Y,(res[i-1][2])+k2y/2), (Z,(res[i-1][3])+k2y/2)])
k4y = h * func[0].subs([(X,(res[i-1][1])+h),(Y,(res[i-1][2])+k3y),(Z,(res[i-1][3])+k3y)])
res[i][2] = res[i-1][2] + 1/6 * (k1y + 2*k2y + 2*k3y + k4y)
k1z = h * func[1].subs([(X,res[i-1][1]),(Y,res[i-1][2]), (Z,res[i-1][3])])
k2z = h * func[1].subs([(X,(res[i-1][1])+h/2),(Y,(res[i-1][2])+k1z/2), (Z,(res[i-1][3])+k1z/2)])
k3z = h * func[1].subs([(X,(res[i-1][1])+h/2),(Y,(res[i-1][2])+k2z/2), (Z,(res[i-1][3])+k2z/2)])
k4z = h * func[1].subs([(X,(res[i-1][1])+h),(Y,(res[i-1][2])+k3z),(Z,(res[i-1][3])+k3z)])
res[i][3] = res[i-1][3] + 1/6 * (k1z + 2*k2z + 2*k3z + k4z)
return res
def rungekutta_once(func, x0, y0, a, b, n):
h = (b-a)/n
x = np.arange(x0,x0+(b-a),h)
X = Symbol('x')
Y = Symbol('y')
Z = Symbol('z')
res = [[i, x[i], 0] for i in range(n)]
res[0][2] = y0
for i in range(1,n):
k1 = h * func[0].subs([(X,res[i-1][1]),(Y,res[i-1][2])])
k2 = h * func[0].subs([(X,(res[i-1][1])+h/2),(Y,(res[i-1][2])+k1/2)])
k3 = h * func[0].subs([(X,(res[i-1][1])+h/2),(Y,(res[i-1][2])+k2/2)])
k4 = h * func[0].subs([(X,(res[i-1][1])+h),(Y,(res[i-1][2])+k3)])
res[i][2] = res[i-1][2] + 1/6 * (k1 + 2*k2 + 2*k3 + k4)
return res
def rungekutta_system(func, x0, y0, z0, a, b, n):
h = (b-a)/n
x = np.arange(x0,x0+(b-a),h)
X = Symbol('x')
Y = Symbol('y')
Z = Symbol('z')
res = [[i, x[i], 0, 0] for i in range(n)]
res[0][2] = y0
res[0][3] = z0
for i in range(1,n):
k1y = h * func[0].subs([(X,res[i-1][1]),(Y,res[i-1][2]), (Z,res[i-1][3])])
k2y = h * func[0].subs([(X,(res[i-1][1])+h/2),(Y,(res[i-1][2])+k1y/2), (Z,(res[i-1][3])+k1y/2)])
k3y = h * func[0].subs([(X,(res[i-1][1])+h/2),(Y,(res[i-1][2])+k2y/2), (Z,(res[i-1][3])+k2y/2)])
k4y = h * func[0].subs([(X,(res[i-1][1])+h),(Y,(res[i-1][2])+k3y),(Z,(res[i-1][3])+k3y)])
res[i][2] = res[i-1][2] + 1/6 * (k1y + 2*k2y + 2*k3y + k4y)
k1z = h * func[1].subs([(X,res[i-1][1]),(Y,res[i-1][2]), (Z,res[i-1][3])])
k2z = h * func[1].subs([(X,(res[i-1][1])+h/2),(Y,(res[i-1][2])+k1z/2), (Z,(res[i-1][3])+k1z/2)])
k3z = h * func[1].subs([(X,(res[i-1][1])+h/2),(Y,(res[i-1][2])+k2z/2), (Z,(res[i-1][3])+k2z/2)])
k4z = h * func[1].subs([(X,(res[i-1][1])+h),(Y,(res[i-1][2])+k3z),(Z,(res[i-1][3])+k3z)])
res[i][3] = res[i-1][3] + 1/6 * (k1z + 2*k2z + 2*k3z + k4z)
return res
def odeint_scp(func, x0, y0, a, b, n):
h = (b-a)/n
x = np.arange(x0,x0+(b-a),h)
X = Symbol('x')
Y = Symbol('y')
Z = Symbol('z')
f = lambdify([X,Y],func[0])
res = [[i, x[i], 0] for i in range(n)]
y = odeint(f,y0,np.array(x))
for i in range(n):
res[i][2] = float(y[i])
return res
#func, x0, y0, z0, a, b, n = iteration()
#print(func)
#print(x0)
#print(y0)
#print(z0)
#print(a)
#print(b)
#print(n)
# In[11]:
#euler(func, x0, y0, z0, a, b, n)
# In[12]:
#eulercauchy(func, x0, y0, z0, a, b, n)
# In[13]:
#rungekutta(func, x0, y0, z0, a, b, n)
# #### Ввод одного уравнения
# In[27]:
#func, x0, y0, a, b, n = iteration_once()
#print(func)
#print(x0)
#print(y0)
#print(a)
#print(b)
#print(n)
# In[28]:
#euler_once(func, x0, y0, a, b, n)
# In[29]:
#eulercauchy_once(func, x0, y0, a, b, n)
# In[30]:
#rungekutta_once(func, x0, y0, a, b, n)
# In[31]:
##odeint_scp(func, x0, y0, a, b, n)
# #### Ввод системы уравнений
# #### Ввод одного уравнения
# In[12]:
def obertka():
"""
Данная функция оборачивает код ниже для возможности избежания конфликта с константами, для возможности считывания кода библиотекой
"""
func, x0, y0, a, b, n = iteration_once()
t0 = time.time()
result_euler = euler_once(func, x0, y0, a, b, n)
t1_euler = time.time()
t1_euler -= t0
t0 = time.time()
result_eulercauchy = eulercauchy_once(func, x0, y0, a, b, n)
t1_eulercauchy = time.time()
t1_eulercauchy -= t0
t0 = time.time()
result_rungekutta = rungekutta_once(func, x0, y0, a, b, n)
t1_rungekutta = time.time()
t1_rungekutta -= t0
t0 = time.time()
result_odeint_scp = odeint_scp(func, x0, y0, a, b, n)
t1_odeint_scp = time.time()
t1_odeint_scp -= t0
xd = [result_euler[i][1] for i in range(n)]
yd_euler = [result_euler[i][2] for i in range(n)]
yd_eulercauchy = [result_eulercauchy[i][2] for i in range(n)]
yd_rungekutta = [result_rungekutta[i][2] for i in range(n)]
yd_odeint_scp = [result_odeint_scp[i][2] for i in range(n)]
# Эйлер
df_euler = pd.DataFrame({'xi':xd,'yi':yd_euler})
print('--------------------------------------------------------------')
print('Решение ОДУ методом Эйлера')
print(df_euler)
# Эйлер-Коши
df_eulercauchy = pd.DataFrame({'xi':xd,'yi':yd_eulercauchy})
print('--------------------------------------------------------------')
print('Решение ОДУ методом Эйлера-Коши')
print(df_eulercauchy)
# Рунге-Кутта
df_rungekutta = pd.DataFrame({'xi':xd,'yi':yd_rungekutta})
print('--------------------------------------------------------------')
print('Решение ОДУ методом Рунге-Кутты')
print(df_rungekutta)
# scipy.integrate odeint
df_odeint_scp = pd.DataFrame({'xi':xd,'yi':yd_odeint_scp})
print('--------------------------------------------------------------')
print('Решение ОДУ scipy.integrate odeint')
print(df_odeint_scp)
dxa = xd[1] - xd[0]
ymydiff = []
for i in range(len(yd_odeint_scp) - 1):
ansi = diff_my(yd_odeint_scp[1],yd_odeint_scp[0],dxa)
ymydiff.append(ansi)
df_ymydiff = pd.DataFrame({'xi':xd,'deltai':yd_odeint_scp})
print('--------------------------------------------------------------')
print('Разность между yi и y' + "'" + 'i')
print(df_ymydiff)
deltai = [(yd_odeint_scp[i] - ymydiff[i]) for i in range(len(ymydiff))]
sumx = sum([abs(t) for t in deltai])
rcParams['figure.figsize'] = (30, 20)
rcParams['figure.dpi'] = 300
fig = plt.figure()
fig,ax = plt.subplots()
plt.tick_params(labelsize = 40)
plt.grid(b=True, color='DarkTurquoise', alpha=0.75, linestyle=':', linewidth=1)
ax.plot(xd[:-1],deltai,color='blue',lw=2,label='Y - Y' + "'")
ax.plot(xd[:-1],deltai,color='blue',lw=0,label=f'Сумма отклонений = {sumx}')
ax.legend(loc='lower right',title='Легенда',title_fontsize=25,fontsize=20)
plt.show()
# In[36]:
rcParams['figure.figsize'] = (30, 20)
rcParams['figure.dpi'] = 300
fig,ax=plt.subplots()
plt.tick_params(labelsize = 40)
plt.grid(b=True, color='DarkTurquoise', alpha=0.75, linestyle=':', linewidth=1)
ax.plot(xd,yd_euler,color='blue',lw=2,label='Решение ОДУ методом Эйлера')
ax.plot(xd,yd_eulercauchy,color='red',lw=2,label='Решение ОДУ методом Эйлера-Коши')
ax.plot(xd,yd_rungekutta,color='green',lw=2,label='Решение ОДУ методом Рунге-Кутты')
ax.plot(xd,yd_odeint_scp,color='black',lw=2,label='Решение ОДУ scipy.integrate odeint')
ax.set_title('Решение ОДУ')
ax.legend(loc='lower right',title='Легенда',title_fontsize=25,fontsize=15)
plt.xlabel('X',fontsize=25)
plt.ylabel('Y',fontsize=25)
print(f'Аппроксимация МНК для метода Эйлера (XY): \n {quadratic_function(xd,yd_euler)}')
print(f'Интерполяция через первую формулу Ньютона для метода Эйлера (XY): \n {expand(newton1_function(xd,yd_euler))}')
print('-----------------------------------------------------------------------------------')
print(f'Аппроксимация МНК для метода Эйлера-Коши (XY): \n {quadratic_function(xd,yd_eulercauchy)}')
print(f'Интерполяция через первую формулу Ньютона для метода Эйлера-Коши (XY): \n {expand(newton1_function(xd,yd_eulercauchy))}')
print('-----------------------------------------------------------------------------------')
print(f'Аппроксимация МНК для метода Рунге-Кутты (XY): \n {quadratic_function(xd,yd_rungekutta)}')
print(f'Интерполяция через первую формулу Ньютона для метода Рунге-Кутты (XY): \n {expand(newton1_function(xd,yd_rungekutta))}')
print('-----------------------------------------------------------------------------------')
print(f'Аппроксимация МНК для метода Рунге-Кутты (XY): \n {quadratic_function(xd,yd_odeint_scp)}')
print(f'Интерполяция через первую формулу Ньютона для метода Рунге-Кутты (XY): \n {expand(newton1_function(xd,yd_odeint_scp))}')
t=[]
print(f'Время работы метода Эйлера: {t1_euler}.')
t.append(t1_euler)
print(f'Время работы метода Эйлера-Коши: {t1_eulercauchy}.')
t.append(t1_eulercauchy)
print(f'Время работы метода Рунге-Кутты: {t1_rungekutta}.')
t.append(t1_rungekutta)
print(f'Время работы метода из библиотеки: {t1_odeint_scp}.')
t.append(t1_odeint_scp)
# In[14]:
rcParams['figure.figsize'] = (30, 20)
rcParams['figure.dpi'] = 300
fig,ax=plt.subplots()
plt.tick_params(labelsize = 40)
plt.grid(b=True, color='DarkTurquoise', alpha=0.75, linestyle=':', linewidth=1)
plt.bar(0, t[0], color = 'purple', label = 'Время работы метода Эйлера')
plt.bar(1, t[1], color = 'yellow', label = 'Время работы метода Эйлера-Коши')
plt.bar(2, t[2], color = 'lime', label = 'Время работы метода Рунге-Кутты')
plt.bar(3, t[3], color = 'red', label = 'Время работы метода из библиотеки')
ax.set_title('Анализ времени работы программ', loc = 'center')
ax.legend(loc='lower right',title='Легенда',title_fontsize=25,fontsize=15)
plt.ylabel('Время (cек.)',fontsize=25)
plt.show()
# #### Ввод системы уравнений
# In[15]:
func, x0, y0, z0, a, b, n = iteration_system()
t0 = time.time()
result_euler = euler_system(func, x0, y0, z0, a, b, n)
t1_euler = time.time()
t1_euler -= t0
t0 = time.time()
result_eulercauchy = eulercauchy_system(func, x0, y0, z0, a, b, n)
t1_eulercauchy = time.time()
t1_eulercauchy -= t0
t0 = time.time()
result_rungekutta = rungekutta_system(func, x0, y0, z0, a, b, n)
t1_rungekutta = time.time()
t1_rungekutta -= t0
xd = [result_euler[i][1] for i in range(n)]
yd_euler = [result_euler[i][2] for i in range(n)]
yd_eulercauchy = [result_eulercauchy[i][2] for i in range(n)]
yd_rungekutta = [result_rungekutta[i][2] for i in range(n)]
# Эйлер
zd_euler = [result_euler[i][3] for i in range(n)]
df_euler = | pd.DataFrame({'xi':xd,'yi':yd_euler,'zi':zd_euler}) | pandas.DataFrame |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')
])
weights2 = pd.DataFrame(vals, index=widx, columns=["CO0", "CO1"])
weights = {"CL": weights1, "CO": weights2}
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15, 0.1, 0.15],
[0.075, 0.45, 0.075, 0.25],
[-0.5, 0.2, pd.np.NaN, pd.np.NaN]],
index=weights["CL"].index.levels[0],
columns=['CL0', 'CL1', 'CO0', 'CO1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_instr_rets_key_error():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5')])
irets = pd.Series([0.02, 0.01, 0.012], index=idx)
vals = [1, 1/2, 1/2, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(KeyError):
util.calc_rets(irets, weights)
def test_calc_rets_nan_instr_rets():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([pd.np.NaN, pd.np.NaN, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([pd.np.NaN, pd.np.NaN, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_weight():
# see https://github.com/matthewgilbert/mapping/issues/8
# missing weight for return
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
rets = pd.Series([0.02, -0.03, 0.06], index=idx)
vals = [1, 1]
widx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
# extra instrument
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights1 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLH5'), # extra day for no weight instrument
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5')
])
rets = pd.Series([0.02, -0.03, 0.06, 0.05, 0.01], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights1)
# leading / trailing returns
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights2 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-05'), 'CLF5')])
rets = pd.Series([0.02, -0.03, 0.06, 0.05], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights2)
def test_to_notional_empty():
instrs = pd.Series()
prices = pd.Series()
multipliers = pd.Series()
res_exp = pd.Series()
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_same_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_extra_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2, 13.1], index=['CLZ6', 'COZ6',
'GCZ6', 'extra'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_missing_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, pd.np.NaN],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_different_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
res_exp = pd.Series([-30.20, 2 * 30.5 / 1.32, 10.2 * 0.8],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
assert_series_equal(res, res_exp)
def test_to_notional_duplicates():
instrs = pd.Series([1, 1], index=['A', 'A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37, 200.37], index=['A', 'A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100, 100], index=['A', 'A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD', 'USD'], index=['A', 'A'])
fx_rate = pd.Series([1.32], index=['USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD'], index=['A'])
fx_rate = pd.Series([1.32, 1.32], index=['USDCAD', 'USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
def test_to_notional_bad_fx():
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
instr_fx = pd.Series(['JPY'], index=['A'])
fx_rates = pd.Series([1.32], index=['GBPCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
def test_to_contracts_rounder():
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
multipliers = pd.Series([1, 1], index=['CLZ6', 'COZ6'])
# 30.19 / 30.20 is slightly less than 1 so will round to 0
notional = pd.Series([30.19, 2 * 30.5], index=['CLZ6', 'COZ6'])
res = util.to_contracts(notional, prices, multipliers,
rounder=pd.np.floor)
res_exp = pd.Series([0, 2], index=['CLZ6', 'COZ6'])
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier():
notionals = pd.Series([-30.20, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier_rounding():
# won't work out to integer number of contracts so this tests rounding
notionals = pd.Series([-30.21, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_trade_with_zero_amount():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, 0], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) + 0 * 0.5 / (50.41*100) - 1,
# 0 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 19], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_trade_all_zero_amount_return_empty():
wts = pd.DataFrame([1], index=["CLX16"], columns=[0])
desired_holdings = pd.Series([13], index=[0])
current_contracts = 0
prices = pd.Series([50.32], index=['CLX16'])
multiplier = pd.Series([100], index=['CLX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
exp_trades = pd.Series(dtype="int64")
assert_series_equal(trades, exp_trades)
def test_trade_one_asset():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_multi_asset():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=["CL0", "CL1"])
wts2 = pd.DataFrame([1], index=["COX16"], columns=["CO0"])
wts = {"CL": wts1, "CO": wts2}
desired_holdings = pd.Series([200000, -50000, 100000],
index=["CL0", "CL1", "CO0"])
current_contracts = pd.Series([0, 1, 0, 5],
index=['CLX16', 'CLZ16', 'CLF17',
'COX16'])
prices = pd.Series([50.32, 50.41, 50.48, 49.50],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
multiplier = pd.Series([100, 100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
# 100000 * 1 / (49.50*100) - 5,
exp_trades = pd.Series([20, 14, -5, 15], index=['CLX16', 'CLZ16',
'CLF17', 'COX16'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_extra_desired_holdings_without_weights():
wts = pd.DataFrame([0], index=["CLX16"], columns=["CL0"])
desired_holdings = pd.Series([200000, 10000], index=["CL0", "CL1"])
current_contracts = pd.Series([0], index=['CLX16'])
prices = pd.Series([50.32], index=['CLX16'])
multipliers = pd.Series([1], index=['CLX16'])
with pytest.raises(ValueError):
util.calc_trades(current_contracts, desired_holdings, wts, prices,
multipliers)
def test_trade_extra_desired_holdings_without_current_contracts():
# this should treat the missing holdings as 0, since this would often
# happen when adding new positions without any current holdings
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1],
index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
# non existent contract holdings result in fill value being a float,
# which casts to float64
assert_series_equal(trades, exp_trades, check_dtype=False)
def test_trade_extra_weights():
# extra weights should be ignored
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000], index=[0])
current_contracts = pd.Series([0, 2], index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
multiplier = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 2,
exp_trades = pd.Series([20, 18], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_get_multiplier_dataframe_weights():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000], index=["CL"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dict_weights():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
wts2 = pd.DataFrame([0.5, 0.5], index=["COX16", "COZ16"], columns=[0])
wts = {"CL": wts1, "CO": wts2}
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16", "COX16",
"COZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dataframe_weights_multiplier_asts_error():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
with pytest.raises(ValueError):
util.get_multiplier(wts, ast_mult)
def test_weighted_expiration_two_generics():
vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF15'),
(TS('2015-01-03'), 'CLG15'),
(TS('2015-01-04'), 'CLF15'),
(TS('2015-01-04'), 'CLG15'),
(TS('2015-01-04'), 'CLH15'),
(TS('2015-01-05'), 'CLG15'),
(TS('2015-01-05'), 'CLH15')])
weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=idx)
contract_dates = pd.Series([TS('2015-01-20'),
TS('2015-02-21'),
TS('2015-03-20')],
index=['CLF15', 'CLG15', 'CLH15'])
wexp = util.weighted_expiration(weights, contract_dates)
exp_wexp = pd.DataFrame([[17.0, 49.0], [32.0, 61.5], [47.0, 74.0]],
index=[TS('2015-01-03'),
TS('2015-01-04'),
TS('2015-01-05')],
columns=["CL1", "CL2"])
assert_frame_equal(wexp, exp_wexp)
def test_flatten():
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
flat_wts = util.flatten(weights)
flat_wts_exp = pd.DataFrame(
{"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
"contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
"generic": ["CL1", "CL2"] * 4,
"weight": [1, 0, 0, 1, 1, 0, 0, 1]}
).loc[:, ["date", "contract", "generic", "weight"]]
assert_frame_equal(flat_wts, flat_wts_exp)
def test_flatten_dict():
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5')])
weights2 = pd.DataFrame(1, index=widx, columns=["CO1"])
weights = {"CL": weights1, "CO": weights2}
flat_wts = util.flatten(weights)
flat_wts_exp = pd.DataFrame(
{"date": ([TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4
+ [TS('2015-01-03')]),
"contract": (['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2
+ ["COF5"]),
"generic": ["CL1", "CL2"] * 4 + ["CO1"],
"weight": [1, 0, 0, 1, 1, 0, 0, 1, 1],
"key": ["CL"] * 8 + ["CO"]}
).loc[:, ["date", "contract", "generic", "weight", "key"]]
assert_frame_equal(flat_wts, flat_wts_exp)
def test_flatten_bad_input():
dummy = 0
with pytest.raises(ValueError):
util.flatten(dummy)
def test_unflatten():
flat_wts = pd.DataFrame(
{"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
"contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
"generic": ["CL1", "CL2"] * 4,
"weight": [1, 0, 0, 1, 1, 0, 0, 1]}
).loc[:, ["date", "contract", "generic", "weight"]]
wts = util.unflatten(flat_wts)
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')],
names=("date", "contract"))
cols = pd.Index(["CL1", "CL2"], name="generic")
wts_exp = pd.DataFrame(vals, index=widx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_unflatten_dict():
flat_wts = pd.DataFrame(
{"date": ([TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4
+ [TS('2015-01-03')]),
"contract": (['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2
+ ["COF5"]),
"generic": ["CL1", "CL2"] * 4 + ["CO1"],
"weight": [1, 0, 0, 1, 1, 0, 0, 1, 1],
"key": ["CL"] * 8 + ["CO"]}
).loc[:, ["date", "contract", "generic", "weight", "key"]]
wts = util.unflatten(flat_wts)
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')],
names=("date", "contract"))
cols = pd.Index(["CL1", "CL2"], name="generic")
weights1 = pd.DataFrame(vals, index=widx, columns=cols)
widx = pd.MultiIndex.from_tuples([( | TS('2015-01-03') | pandas.Timestamp |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import collections
import numpy as np
import re
from numpy import array
from statistics import mode
import pandas as pd
import warnings
import copy
from joblib import Memory
from itertools import chain
import ast
import timeit
from sklearn.neighbors import KNeighborsClassifier # 1 neighbors
from sklearn.svm import SVC # 1 svm
from sklearn.naive_bayes import GaussianNB # 1 naive bayes
from sklearn.neural_network import MLPClassifier # 1 neural network
from sklearn.linear_model import LogisticRegression # 1 linear model
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis # 2 discriminant analysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier # 4 ensemble models
from joblib import Parallel, delayed
import multiprocessing
from sklearn.pipeline import make_pipeline
from sklearn import model_selection
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import log_loss
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from imblearn.metrics import geometric_mean_score
import umap
from sklearn.metrics import classification_report
from sklearn.preprocessing import scale
import eli5
from eli5.sklearn import PermutationImportance
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from mlxtend.classifier import StackingCVClassifier
from mlxtend.feature_selection import ColumnSelector
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from scipy.spatial import procrustes
# This block of code == for the connection between the server, the database, and the client (plus routing).
# Access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def Reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global dataSpacePointsIDs
dataSpacePointsIDs = []
global previousStateActive
previousStateActive = []
global StanceTest
StanceTest = False
global status
status = True
global factors
factors = [1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,1,1,1]
global KNNModelsCount
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
global keyData
keyData = 0
KNNModelsCount = 0
SVCModelsCount = 576
GausNBModelsCount = 736
MLPModelsCount = 1236
LRModelsCount = 1356
LDAModelsCount = 1996
QDAModelsCount = 2196
RFModelsCount = 2446
ExtraTModelsCount = 2606
AdaBModelsCount = 2766
GradBModelsCount = 2926
global XData
XData = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
# Initializing models
global resultsList
resultsList = []
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 5
# models
global KNNModels
KNNModels = []
global RFModels
RFModels = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
return 'The reset was done!'
# Retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def RetrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global XData
XData = []
global previousState
previousState = []
global previousStateActive
previousStateActive = []
global status
status = True
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global filterDataFinal
filterDataFinal = 'mean'
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
# models
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
KNNModels = []
SVCModels = []
GausNBModels = []
MLPModels = []
LRModels = []
LDAModels = []
QDAModels = []
RFModels = []
ExtraTModels = []
AdaBModels = []
GradBModels = []
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
elif data['fileName'] == 'StanceC':
StanceTest = True
CollectionDB = mongo.db.StanceC.find()
CollectionDBTest = mongo.db.StanceCTest.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
elif data['fileName'] == 'BiodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
DataSetSelection()
return 'Everything is okay'
def Convert(lst):
it = iter(lst)
res_dct = dict(zip(it, it))
return res_dct
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def SendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
return 'Processed uploaded data set'
# Sent data to client
@app.route('/data/ClientRequest', methods=["GET", "POST"])
def CollectionData():
json.dumps(DataResultsRaw)
response = {
'Collection': DataResultsRaw
}
return jsonify(response)
def DataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
warnings.simplefilter('ignore')
return 'Everything is okay'
def callPreResults():
global XData
global yData
global target_names
global impDataInst
DataSpaceResMDS = FunMDS(XData)
DataSpaceResTSNE = FunTsne(XData)
DataSpaceResTSNE = DataSpaceResTSNE.tolist()
DataSpaceUMAP = FunUMAP(XData)
XDataJSONEntireSetRes = XData.to_json(orient='records')
global preResults
preResults = []
preResults.append(json.dumps(target_names)) # Position: 0
preResults.append(json.dumps(DataSpaceResMDS)) # Position: 1
preResults.append(json.dumps(XDataJSONEntireSetRes)) # Position: 2
preResults.append(json.dumps(yData)) # Position: 3
preResults.append(json.dumps(AllTargets)) # Position: 4
preResults.append(json.dumps(DataSpaceResTSNE)) # Position: 5
preResults.append(json.dumps(DataSpaceUMAP)) # Position: 6
preResults.append(json.dumps(impDataInst)) # Position: 7
# Sending each model's results to frontend
@app.route('/data/requestDataSpaceResults', methods=["GET", "POST"])
def SendDataSpaceResults():
global preResults
callPreResults()
response = {
'preDataResults': preResults,
}
return jsonify(response)
# Main function
if __name__ == '__main__':
app.run()
# Debugging and mirroring client
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
if app.debug:
return requests.get('http://localhost:8080/{}'.format(path)).text
return render_template("index.html")
# This block of code is for server computations
def column_index(df, query_cols):
cols = df.columns.values
sidx = np.argsort(cols)
return sidx[np.searchsorted(cols,query_cols,sorter=sidx)].tolist()
def class_feature_importance(X, Y, feature_importances):
N, M = X.shape
X = scale(X)
out = {}
for c in set(Y):
out[c] = dict(
zip(range(N), np.mean(X[Y==c, :], axis=0)*feature_importances)
)
return out
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/EnsembleMode', methods=["GET", "POST"])
def EnsembleMethod():
global crossValidation
global RANDOM_SEED
global XData
RANDOM_SEED = 42
RetrievedStatus = request.get_data().decode('utf8').replace("'", '"')
RetrievedStatus = json.loads(RetrievedStatus)
modeMethod = RetrievedStatus['defaultModeMain']
if (modeMethod == 'blend'):
crossValidation = ShuffleSplit(n_splits=1, test_size=.20, random_state=RANDOM_SEED)
else:
crossValidation = 5
return 'Okay'
# Initialize every model for each algorithm
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelParameters', methods=["GET", "POST"])
def RetrieveModel():
# get the models from the frontend
RetrievedModel = request.get_data().decode('utf8').replace("'", '"')
RetrievedModel = json.loads(RetrievedModel)
global algorithms
algorithms = RetrievedModel['Algorithms']
toggle = RetrievedModel['Toggle']
global crossValidation
global XData
global yData
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
# loop through the algorithms
global allParametersPerformancePerModel
start = timeit.default_timer()
print('CVorTT', crossValidation)
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = {'n_neighbors': list(range(1, 25)), 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.1,4.43,0.11)), 'kernel': ['rbf','linear', 'poly', 'sigmoid']}
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = {'var_smoothing': list(np.arange(0.00000000001,0.0000001,0.0000000002))}
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = {'alpha': list(np.arange(0.00001,0.001,0.0002)), 'tol': list(np.arange(0.00001,0.001,0.0004)), 'max_iter': list(np.arange(100,200,100)), 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver' : ['adam', 'sgd']}
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.5,2,0.075)), 'max_iter': list(np.arange(50,250,50)), 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = {'shrinkage': list(np.arange(0,1,0.01)), 'solver': ['lsqr', 'eigen']}
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = {'reg_param': list(np.arange(0,1,0.02)), 'tol': list(np.arange(0.00001,0.001,0.0002))}
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(40, 80)), 'learning_rate': list(np.arange(0.1,2.3,1.1)), 'algorithm': ['SAMME.R', 'SAMME']}
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(85, 115)), 'learning_rate': list(np.arange(0.01,0.23,0.11)), 'criterion': ['friedman_mse', 'mse', 'mae']}
AlgorithmsIDsEnd = GradBModelsCount
allParametersPerformancePerModel = GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossValidation)
# New visualization - model space
# header = "model_id,algorithm_id,mean_test_accuracy,mean_test_precision_micro,mean_test_precision_macro,mean_test_precision_weighted,mean_test_recall_micro,mean_test_recall_macro,mean_test_recall_weighted,mean_test_roc_auc_ovo_weighted,geometric_mean_score_micro,geometric_mean_score_macro,geometric_mean_score_weighted,matthews_corrcoef,f5_micro,f5_macro,f5_weighted,f1_micro,f1_macro,f1_weighted,f2_micro,f2_macro,f2_weighted,log_loss\n"
# dataReceived = []
# counter = 0
# for indx, el in enumerate(allParametersPerformancePerModel):
# dictFR = json.loads(el)
# frame = pd.DataFrame.from_dict(dictFR)
# for ind, elInside in frame.iterrows():
# counter = counter + 1
# dataReceived.append(str(counter))
# dataReceived.append(',')
# dataReceived.append(str(indx+1))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_accuracy']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_roc_auc_ovo_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['matthews_corrcoef']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['log_loss']))
# dataReceived.append("\n")
# dataReceivedItems = ''.join(dataReceived)
# csvString = header + dataReceivedItems
# fw = open ("modelSpace.csv","w+",encoding="utf-8")
# fw.write(csvString)
# fw.close()
# call the function that sends the results to the frontend
stop = timeit.default_timer()
print('Time GridSearch: ', stop - start)
SendEachClassifiersPerformanceToVisualize()
return 'Everything Okay'
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossVal):
print('loop')
# this is the grid we use to train the models
grid = GridSearchCV(
estimator=clf, param_grid=params,
cv=crossVal, refit='accuracy', scoring=scoring,
verbose=0, n_jobs=-1)
# fit and extract the probabilities
grid.fit(XData, yData)
# process the results
cv_results = []
cv_results.append(grid.cv_results_)
df_cv_results = pd.DataFrame.from_dict(cv_results)
# number of models stored
number_of_models = len(df_cv_results.iloc[0][0])
# initialize results per row
df_cv_results_per_row = []
# loop through number of models
modelsIDs = []
for i in range(number_of_models):
modelsIDs.append(AlgorithmsIDsEnd+i)
# initialize results per item
df_cv_results_per_item = []
for column in df_cv_results.iloc[0]:
df_cv_results_per_item.append(column[i])
df_cv_results_per_row.append(df_cv_results_per_item)
# store the results into a pandas dataframe
df_cv_results_classifiers = pd.DataFrame(data = df_cv_results_per_row, columns= df_cv_results.columns)
# copy and filter in order to get only the metrics
metrics = df_cv_results_classifiers.copy()
metrics = metrics.filter(['mean_test_accuracy','mean_test_precision_micro','mean_test_precision_macro','mean_test_precision_weighted','mean_test_recall_micro','mean_test_recall_macro','mean_test_recall_weighted','mean_test_roc_auc_ovo_weighted'])
# concat parameters and performance
parametersPerformancePerModel = pd.DataFrame(df_cv_results_classifiers['params'])
parametersPerformancePerModel = parametersPerformancePerModel.to_json()
parametersLocal = json.loads(parametersPerformancePerModel)['params'].copy()
Models = []
for index, items in enumerate(parametersLocal):
Models.append(str(index))
parametersLocalNew = [ parametersLocal[your_key] for your_key in Models ]
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
PerClassMetric = []
perModelProb = []
perModelPrediction = []
resultsMicro = []
resultsMacro = []
resultsWeighted = []
resultsCorrCoef = []
resultsMicroBeta5 = []
resultsMacroBeta5 = []
resultsWeightedBeta5 = []
resultsMicroBeta1 = []
resultsMacroBeta1 = []
resultsWeightedBeta1 = []
resultsMicroBeta2 = []
resultsMacroBeta2 = []
resultsWeightedBeta2 = []
resultsLogLoss = []
resultsLogLossFinal = []
loop = 8
# influence calculation for all the instances
inputs = range(len(XData))
num_cores = multiprocessing.cpu_count()
#impDataInst = Parallel(n_jobs=num_cores)(delayed(processInput)(i,XData,yData,crossValidation,clf) for i in inputs)
for eachModelParameters in parametersLocalNew:
clf.set_params(**eachModelParameters)
if (toggle == 1):
perm = PermutationImportance(clf, cv = None, refit = True, n_iter = 25).fit(XData, yData)
permList.append(perm.feature_importances_)
n_feats = XData.shape[1]
PerFeatureAccuracy = []
for i in range(n_feats):
scores = model_selection.cross_val_score(clf, XData.values[:, i].reshape(-1, 1), yData, cv=5)
PerFeatureAccuracy.append(scores.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
else:
permList.append(0)
PerFeatureAccuracyAll.append(0)
clf.fit(XData, yData)
yPredict = clf.predict(XData)
yPredict = np.nan_to_num(yPredict)
perModelPrediction.append(yPredict)
# retrieve target names (class names)
PerClassMetric.append(classification_report(yData, yPredict, target_names=target_names, digits=2, output_dict=True))
yPredictProb = clf.predict_proba(XData)
yPredictProb = np.nan_to_num(yPredictProb)
perModelProb.append(yPredictProb.tolist())
resultsMicro.append(geometric_mean_score(yData, yPredict, average='micro'))
resultsMacro.append(geometric_mean_score(yData, yPredict, average='macro'))
resultsWeighted.append(geometric_mean_score(yData, yPredict, average='weighted'))
resultsCorrCoef.append(matthews_corrcoef(yData, yPredict))
resultsMicroBeta5.append(fbeta_score(yData, yPredict, average='micro', beta=0.5))
resultsMacroBeta5.append(fbeta_score(yData, yPredict, average='macro', beta=0.5))
resultsWeightedBeta5.append(fbeta_score(yData, yPredict, average='weighted', beta=0.5))
resultsMicroBeta1.append(fbeta_score(yData, yPredict, average='micro', beta=1))
resultsMacroBeta1.append(fbeta_score(yData, yPredict, average='macro', beta=1))
resultsWeightedBeta1.append(fbeta_score(yData, yPredict, average='weighted', beta=1))
resultsMicroBeta2.append(fbeta_score(yData, yPredict, average='micro', beta=2))
resultsMacroBeta2.append(fbeta_score(yData, yPredict, average='macro', beta=2))
resultsWeightedBeta2.append(fbeta_score(yData, yPredict, average='weighted', beta=2))
resultsLogLoss.append(log_loss(yData, yPredictProb, normalize=True))
maxLog = max(resultsLogLoss)
minLog = min(resultsLogLoss)
for each in resultsLogLoss:
resultsLogLossFinal.append((each-minLog)/(maxLog-minLog))
metrics.insert(loop,'geometric_mean_score_micro',resultsMicro)
metrics.insert(loop+1,'geometric_mean_score_macro',resultsMacro)
metrics.insert(loop+2,'geometric_mean_score_weighted',resultsWeighted)
metrics.insert(loop+3,'matthews_corrcoef',resultsCorrCoef)
metrics.insert(loop+4,'f5_micro',resultsMicroBeta5)
metrics.insert(loop+5,'f5_macro',resultsMacroBeta5)
metrics.insert(loop+6,'f5_weighted',resultsWeightedBeta5)
metrics.insert(loop+7,'f1_micro',resultsMicroBeta1)
metrics.insert(loop+8,'f1_macro',resultsMacroBeta1)
metrics.insert(loop+9,'f1_weighted',resultsWeightedBeta1)
metrics.insert(loop+10,'f2_micro',resultsMicroBeta2)
metrics.insert(loop+11,'f2_macro',resultsMacroBeta2)
metrics.insert(loop+12,'f2_weighted',resultsWeightedBeta2)
metrics.insert(loop+13,'log_loss',resultsLogLossFinal)
perModelPredPandas = pd.DataFrame(perModelPrediction)
perModelPredPandas = perModelPredPandas.to_json()
perModelProbPandas = pd.DataFrame(perModelProb)
perModelProbPandas = perModelProbPandas.to_json()
PerClassMetricPandas = pd.DataFrame(PerClassMetric)
del PerClassMetricPandas['accuracy']
del PerClassMetricPandas['macro avg']
del PerClassMetricPandas['weighted avg']
PerClassMetricPandas = PerClassMetricPandas.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=chi2, k='all')
fit = bestfeatures.fit(XData,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(XData.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
# gather the results and send them back
results.append(modelsIDs) # Position: 0 and so on
results.append(parametersPerformancePerModel) # Position: 1 and so on
results.append(PerClassMetricPandas) # Position: 2 and so on
results.append(PerFeatureAccuracyPandas) # Position: 3 and so on
results.append(perm_imp_eli5PD) # Position: 4 and so on
results.append(featureScores) # Position: 5 and so on
metrics = metrics.to_json()
results.append(metrics) # Position: 6 and so on
results.append(perModelProbPandas) # Position: 7 and so on
results.append(json.dumps(perModelPredPandas)) # Position: 8 and so on
return results
# Sending each model's results to frontend
@app.route('/data/PerformanceForEachModel', methods=["GET", "POST"])
def SendEachClassifiersPerformanceToVisualize():
response = {
'PerformancePerModel': allParametersPerformancePerModel,
}
return jsonify(response)
def Remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
if (isinstance(num, float)):
if np.isnan(num):
pass
else:
final_list.append(float(num))
else:
final_list.append(num)
return final_list
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendBrushedParam', methods=["GET", "POST"])
def RetrieveModelsParam():
RetrieveModelsPar = request.get_data().decode('utf8').replace("'", '"')
RetrieveModelsPar = json.loads(RetrieveModelsPar)
counterKNN = 0
counterSVC = 0
counterGausNB = 0
counterMLP = 0
counterLR = 0
counterLDA = 0
counterQDA = 0
counterRF = 0
counterExtraT = 0
counterAdaB = 0
counterGradB = 0
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
global algorithmsList
algorithmsList = RetrieveModelsPar['algorithms']
for index, items in enumerate(algorithmsList):
if (items == 'KNN'):
counterKNN += 1
KNNModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'SVC'):
counterSVC += 1
SVCModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'GauNB'):
counterGausNB += 1
GausNBModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'MLP'):
counterMLP += 1
MLPModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LR'):
counterLR += 1
LRModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LDA'):
counterLDA += 1
LDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'QDA'):
counterQDA += 1
QDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'RF'):
counterRF += 1
RFModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'ExtraT'):
counterExtraT += 1
ExtraTModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'AdaB'):
counterAdaB += 1
AdaBModels.append(int(RetrieveModelsPar['models'][index]))
else:
counterGradB += 1
GradBModels.append(int(RetrieveModelsPar['models'][index]))
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/factors', methods=["GET", "POST"])
def RetrieveFactors():
global factors
global allParametersPerformancePerModel
Factors = request.get_data().decode('utf8').replace("'", '"')
FactorsInt = json.loads(Factors)
factors = FactorsInt['Factors']
# this is if we want to change the factors before running the search
#if (len(allParametersPerformancePerModel) == 0):
# pass
#else:
global sumPerClassifierSel
global ModelSpaceMDSNew
global ModelSpaceTSNENew
global metricsPerModel
sumPerClassifierSel = []
sumPerClassifierSel = preProcsumPerMetric(factors)
ModelSpaceMDSNew = []
ModelSpaceTSNENew = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
metricsPerModel = preProcMetricsAllAndSel()
flagLocal = 0
countRemovals = 0
for l,el in enumerate(factors):
if el == 0:
loopThroughMetrics.drop(loopThroughMetrics.columns[[l-countRemovals]], axis=1, inplace=True)
countRemovals = countRemovals + 1
flagLocal = 1
if flagLocal == 1:
ModelSpaceMDSNew = FunMDS(loopThroughMetrics)
ModelSpaceTSNENew = FunTsne(loopThroughMetrics)
ModelSpaceTSNENew = ModelSpaceTSNENew.tolist()
return 'Everything Okay'
@app.route('/data/UpdateOverv', methods=["GET", "POST"])
def UpdateOverview():
ResultsUpdateOverview = []
ResultsUpdateOverview.append(sumPerClassifierSel)
ResultsUpdateOverview.append(ModelSpaceMDSNew)
ResultsUpdateOverview.append(ModelSpaceTSNENew)
ResultsUpdateOverview.append(metricsPerModel)
response = {
'Results': ResultsUpdateOverview
}
return jsonify(response)
def PreprocessingMetrics():
dicKNN = json.loads(allParametersPerformancePerModel[6])
dicSVC = json.loads(allParametersPerformancePerModel[15])
dicGausNB = json.loads(allParametersPerformancePerModel[24])
dicMLP = json.loads(allParametersPerformancePerModel[33])
dicLR = json.loads(allParametersPerformancePerModel[42])
dicLDA = json.loads(allParametersPerformancePerModel[51])
dicQDA = json.loads(allParametersPerformancePerModel[60])
dicRF = json.loads(allParametersPerformancePerModel[69])
dicExtraT = json.loads(allParametersPerformancePerModel[78])
dicAdaB = json.loads(allParametersPerformancePerModel[87])
dicGradB = json.loads(allParametersPerformancePerModel[96])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatMetrics = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatMetrics
def PreprocessingPred():
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
predictions = []
for column, content in df_concatProbs.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictions.append(el)
return predictions
def PreprocessingPredUpdate(Models):
Models = json.loads(Models)
ModelsList= []
for loop in Models['ClassifiersList']:
ModelsList.append(loop)
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
listProbs = df_concatProbs.index.values.tolist()
deletedElements = 0
for index, element in enumerate(listProbs):
if element in ModelsList:
index = index - deletedElements
df_concatProbs = df_concatProbs.drop(df_concatProbs.index[index])
deletedElements = deletedElements + 1
df_concatProbsCleared = df_concatProbs
listIDsRemoved = df_concatProbsCleared.index.values.tolist()
predictionsAll = PreprocessingPred()
PredictionSpaceAll = FunMDS(predictionsAll)
PredictionSpaceAllComb = [list(a) for a in zip(PredictionSpaceAll[0], PredictionSpaceAll[1])]
predictionsSel = []
for column, content in df_concatProbsCleared.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsSel.append(el)
PredictionSpaceSel = FunMDS(predictionsSel)
PredictionSpaceSelComb = [list(a) for a in zip(PredictionSpaceSel[0], PredictionSpaceSel[1])]
mtx2PredFinal = []
mtx2Pred, mtx2Pred, disparityPred = procrustes(PredictionSpaceAllComb, PredictionSpaceSelComb)
a1, b1 = zip(*mtx2Pred)
mtx2PredFinal.append(a1)
mtx2PredFinal.append(b1)
return [mtx2PredFinal,listIDsRemoved]
def PreprocessingParam():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfSVC = dfSVC.T
dfGausNB = dfGausNB.T
dfMLP = dfMLP.T
dfLR = dfLR.T
dfLDA = dfLDA.T
dfQDA = dfQDA.T
dfRF = dfRF.T
dfExtraT = dfExtraT.T
dfAdaB = dfAdaB.T
dfGradB = dfGradB.T
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_params = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_params
def PreprocessingParamSep():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfSVC = dfSVC.T
dfGausNB = dfGausNB.T
dfMLP = dfMLP.T
dfLR = dfLR.T
dfLDA = dfLDA.T
dfQDA = dfQDA.T
dfRF = dfRF.T
dfExtraT = dfExtraT.T
dfAdaB = dfAdaB.T
dfGradB = dfGradB.T
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
return [dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered]
def preProcessPerClassM():
dicKNN = json.loads(allParametersPerformancePerModel[2])
dicSVC = json.loads(allParametersPerformancePerModel[11])
dicGausNB = json.loads(allParametersPerformancePerModel[20])
dicMLP = json.loads(allParametersPerformancePerModel[29])
dicLR = json.loads(allParametersPerformancePerModel[38])
dicLDA = json.loads(allParametersPerformancePerModel[47])
dicQDA = json.loads(allParametersPerformancePerModel[56])
dicRF = json.loads(allParametersPerformancePerModel[65])
dicExtraT = json.loads(allParametersPerformancePerModel[74])
dicAdaB = json.loads(allParametersPerformancePerModel[83])
dicGradB = json.loads(allParametersPerformancePerModel[92])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatParams = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatParams
def preProcessFeatAcc():
dicKNN = json.loads(allParametersPerformancePerModel[3])
dicSVC = json.loads(allParametersPerformancePerModel[12])
dicGausNB = json.loads(allParametersPerformancePerModel[21])
dicMLP = json.loads(allParametersPerformancePerModel[30])
dicLR = json.loads(allParametersPerformancePerModel[39])
dicLDA = json.loads(allParametersPerformancePerModel[48])
dicQDA = json.loads(allParametersPerformancePerModel[57])
dicRF = json.loads(allParametersPerformancePerModel[66])
dicExtraT = json.loads(allParametersPerformancePerModel[75])
dicAdaB = json.loads(allParametersPerformancePerModel[84])
dicGradB = json.loads(allParametersPerformancePerModel[93])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = | pd.DataFrame.from_dict(dicSVC) | pandas.DataFrame.from_dict |
import pandas as pd
import numpy as np
from sklearn.cross_validation import StratifiedKFold, KFold
import xgboost
from sklearn.grid_search import ParameterGrid
from sklearn.metrics import mean_squared_error
CLASS = False # Whether classification or regression
SCORE_MIN = True # Optimizing score through minimum
k = 5 # Number of folds
best_score = 10
best_params = None
best_iter = None
train_name = 'train.csv'
test_name = 'test.csv'
submission_name = 'sample_submission.csv'
submission_col = 'SalePrice'
submission_target = 'Vayne8.csv'
# Read files
train = pd.DataFrame.from_csv(train_name)
train = train.fillna(-1)
test = pd.DataFrame.from_csv(test_name)
test = test.fillna(-1)
submission = pd.DataFrame.from_csv(submission_name)
# Extract target
target = train['SalePrice']
del train['SalePrice']
# Label nominal variables to numbers
columns = train.columns.values
nom_numeric_cols = ['MSSubClass']
dummy_train = []
dummy_test = []
for col in columns:
# Only works for nominal data without a lot of factors
if train[col].dtype.name == 'object' or col in nom_numeric_cols:
dummy_train.append(pd.get_dummies(train[col].values.astype(str), col))
dummy_train[-1].index = train.index
dummy_test.append(pd.get_dummies(test[col].values.astype(str), col))
dummy_test[-1].index = test.index
del train[col]
del test[col]
train = | pd.concat([train] + dummy_train, axis=1) | pandas.concat |
import os
import pandas as pd
import numpy as np
import scipy
import openpyxl
from openpyxl import Workbook
import scipy.stats as stats
import file_functions
def sankey_chi_squared(detrended_dem, zs):
"""This function calculates a chi squared test comparing observed landform transitions vs expected, with expected
frequencies proportional to relative abundance. Low p values indicate significant transition preferences.
Returns: two dataframes, one containing transitions %, expected %, and p value for both base->bf and bf->vf"""
col_labels = ['base', 'bf', 'vf']
code_dict = {-2: 'O', -1: 'CP', 0: 'NC', 1: 'WB', 2: 'NZ'} # code number and corresponding MU
landforms = ['Oversized', 'Const. Pool', 'Normal', 'Wide bar', 'Nozzle']
outs = []
if detrended_dem == '':
print('Error: Must input detrended DEM parameter in the GUI to set up output folder location')
return
if type(zs) == str:
zs = file_functions.string_to_list(zs, format='float')
elif type(zs) != list:
print(
'Error: Key flow stage parameter input incorrectly. Please enter stage heights separated only by commas (i.e. 0.2,0.7,3.6)')
# set up directories
dem_dir = os.path.dirname(detrended_dem)
gcs_dir = dem_dir + '\\gcs_tables'
out_dir = dem_dir + '\\nesting_analysis'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# get units for labeling
u = file_functions.get_label_units(detrended_dem)[0]
# prep input data
zs.sort()
z_labels = [file_functions.float_keyz_format(z) + u for z in zs]
aligned_df = pd.read_csv(gcs_dir + '\\aligned_gcs_table.csv')
data = aligned_df.dropna()
out_dict = {'from': [], 'to': [], 'to_landform': [], 'expected_freq': [], 'expected_proportion': []}
# for each step-wise stage transition, calculate chi-squared test result
for i in range(len(zs) - 1):
print('Chi Squares test for landform transitions: %s -> %s' % (z_labels[i], z_labels[i + 1]))
type_df = data.dropna(axis=0, subset=['code_%s' % z_labels[i], 'code_%s' % z_labels[i + 1]])
total_rows = int(type_df.shape[0])
lower = z_labels[i]
higher = z_labels[i + 1]
for num in range(-2, 3):
out_dict['from'].append(lower)
out_dict['to'].append(higher)
out_dict['to_landform'].append(landforms[num + 2])
num_df = type_df.loc[lambda type_df: type_df['code_%s' % col_labels[i + 1]] == num]
out_dict['expected_freq'].append(num_df.shape[0])
out_dict['expected_proportion'].append(num_df.shape[0] / total_rows)
for j, form in enumerate(landforms):
if i == 0:
out_dict['from_' + form + '_freq'] = []
out_dict['from_' + form + '_proportion'] = []
out_dict['p_value_from_%s' % form] = []
low_index = j - 2
low_code = 'code_%s' % z_labels[i]
form_df = type_df.loc[lambda type_df: type_df[low_code] == low_index]
form_rows_count = form_df.shape[0]
for z, high in enumerate(landforms):
high_index = z - 2
high_code = 'code_%s' % z_labels[i + 1]
sub_df = form_df.loc[lambda form_df: form_df[high_code] == high_index]
freq = sub_df.shape[0]
out_dict['from_' + form + '_freq'].append(freq)
out_dict['from_' + form + '_proportion'].append(freq / form_rows_count)
obs = np.array(out_dict['from_' + form + '_freq'])
expect = np.array(out_dict['Expected_freq'])
test_out = stats.chisquare(obs, expect)
out_dict['p_value_from_%s' % form].extend([test_out[1] for i in range(5)]) # Add p values
out_df = | pd.DataFrame.from_dict(out_dict) | pandas.DataFrame.from_dict |
import datetime
import warnings
from copy import copy
from types import MappingProxyType
from typing import Sequence, Callable, Mapping, Union, TypeVar, TYPE_CHECKING
import numpy as np
import pandas as pd
import sidekick as sk
from .clinical_acessor import Clinical
from .metaclass import ModelMeta
from .. import fitting as fit
from .. import formulas
from ..diseases import Disease, DiseaseParams, disease as get_disease
from ..logging import log
from ..mixins import (
Meta,
WithParamsMixin,
WithDataModelMixin,
WithInfoMixin,
WithResultsMixin,
WithRegionDemography,
)
from ..packages import plt
from ..utils import today, not_implemented, extract_keys, param_property
T = TypeVar("T")
NOW = datetime.datetime.now()
TODAY = datetime.date(NOW.year, NOW.month, NOW.day)
DAY = datetime.timedelta(days=1)
pplt = sk.import_later("..plot", package=__package__)
if TYPE_CHECKING:
from ..model_group import ModelGroup
from pydemic_ui.model import UIProperty
class Model(
WithDataModelMixin,
WithInfoMixin,
WithResultsMixin,
WithParamsMixin,
WithRegionDemography,
metaclass=ModelMeta,
):
"""
Base class for all models.
"""
meta: Meta
class Meta:
model_name = "Model"
data_aliases = {}
# Initial values
state: np.ndarray = None
initial_cases: float = sk.lazy(lambda self: self._initial_cases())
initial_infected: float = sk.lazy(lambda self: self._initial_infected())
# Initial time
date: datetime.date = None
time: float = 0.0
iter: int = sk.property(lambda m: len(m.data))
dates: pd.DatetimeIndex = sk.property(lambda m: m.to_dates(m.times))
times: pd.Index = sk.property(lambda m: m.data.index)
# Common epidemiological parameters
R0: float = param_property("R0", default=2.0)
K = sk.property(not_implemented)
duplication_time = property(lambda self: np.log(2) / self.K)
# Special accessors
clinical: Clinical = property(lambda self: Clinical(self))
clinical_model: type = None
clinical_params: Mapping = MappingProxyType({})
disease: Disease = None
disease_params: DiseaseParams = None
@property
def ui(self) -> "UIProperty":
try:
from pydemic_ui.model import UIProperty
except ImportError as ex:
log.warn(f"Could not import pydemic_ui.model: {ex}")
msg = (
"must have pydemic-ui installed to access the model.ui attribute.\n"
"Please 'pip install pydemic-ui' before proceeding'"
)
raise RuntimeError(msg)
return UIProperty(self)
def __init__(
self, params=None, *, run=None, name=None, date=None, clinical=None, disease=None, **kwargs
):
self.name = name or f"{type(self).__name__} model"
self.date = pd.to_datetime(date or today())
self.disease = get_disease(disease)
self._initialized = False
# Fix demography
demography_opts = WithRegionDemography._init_from_dict(self, kwargs)
self.disease_params = self.disease.params(**demography_opts)
# Init other mixins
WithParamsMixin.__init__(self, params, keywords=kwargs)
WithInfoMixin.__init__(self)
WithResultsMixin.__init__(self)
WithDataModelMixin.__init__(self)
if clinical:
clinical = dict(clinical)
self.clinical_model = clinical.pop("model", None)
self.clinical_params = clinical
for k, v in kwargs.items():
if hasattr(self, k):
try:
setattr(self, k, v)
except AttributeError:
name = type(self).__name__
msg = f"cannot set '{k}' attribute in '{name}' model"
raise AttributeError(msg)
else:
raise TypeError(f"invalid arguments: {k}")
if run is not None:
self.run(run)
def __str__(self):
return self.name
def _initial_cases(self):
raise NotImplementedError("must be implemented in subclass")
def _initial_infected(self):
raise NotImplementedError("must be implemented in subclass")
def epidemic_model_name(self):
"""
Return the epidemic model name.
"""
return self.meta.model_name
#
# Pickling and copying
#
# noinspection PyUnresolvedReferences
def copy(self, **kwargs):
"""
Copy instance possibly setting new values for attributes.
Keyword Args:
All keyword arguments are used to reset attributes in the copy.
Examples:
>>> m.copy(R0=1.0, name="Stable")
<SIR(name="Stable")>
"""
cls = type(self)
data = self.__dict__.copy()
params = data.pop("_params")
data.pop("_results_cache")
new = object.__new__(cls)
for k in list(kwargs):
if k in data:
data[k] = kwargs.pop(k)
new._params = copy(params)
new._results_cache = {}
new.__dict__.update(copy(data))
for k, v in kwargs.items():
setattr(new, k, v)
return new
def split(self, n=None, **kwargs) -> "ModelGroup":
"""
Create n copies of model, each one may override a different set of
parameters and return a ModelGroup.
Args:
n:
Number of copies in the resulting list. It can also be a sequence
of dictionaries with arguments to pass to the .copy() constructor.
Keyword Args:
Keyword arguments are passed to the `.copy()` method of the model. If
the keyword is a sequence, it applies the n-th component of the sequence
to the corresponding n-th model.
"""
from ..model_group import ModelGroup
if n is None:
for k, v in kwargs.items():
if not isinstance(v, str) and isinstance(v, Sequence):
n = len(v)
break
else:
raise TypeError("cannot determine the group size from arguments")
if isinstance(n, int):
options = [{} for _ in range(n)]
else:
options = [dict(d) for d in n]
n: int = len(options)
# Merge option dicts
for k, v in kwargs.items():
if not isinstance(v, str) and isinstance(v, Sequence):
xs = v
m = len(xs)
if m != n:
raise ValueError(
f"sizes do not match: "
f"{k} should be a sequence of {n} "
f"items, got {m}"
)
for opt, x in zip(options, xs):
opt.setdefault(k, x)
else:
for opt in options:
opt.setdefault(k, v)
# Fix name
for opt in options:
try:
name = opt["name"]
except KeyError:
pass
else:
opt["name"] = name.format(n=n, **opt)
return ModelGroup(self.copy(**opt) for opt in options)
def split_children(self, options=MappingProxyType({}), **kwargs) -> "ModelGroup":
"""
Similar to split, but split into the children of the given class.
Args:
options:
A mapping between region or region id
"""
from ..model_group import ModelGroup
if self.region is None:
raise ValueError("model is not bound to a region")
for k in self._params:
if k not in kwargs:
kwargs[k] = self.get_param(k)
for attr in ("disease",):
kwargs.setdefault(attr, getattr(self, attr))
return ModelGroup.from_children(self.region, type(self), options, **kwargs)
def reset(self, date: Union[datetime.date, float] = None, **kwargs):
"""
Return a copy of the model setting the state to the final state. If a
positional "date" argument is given, reset to the state to the one in the
specified date.
Args:
date (float or date):
An optional float or datetime selecting the desired date.
Keyword Args:
Additional keyword arguments are handled the same way as the
:method:`copy` method.
"""
if date is None:
date = self.date
time = self.time
elif isinstance(date, (float, int)):
time = float(date)
date = self.to_date(date)
else:
time: float = self.to_time(date)
kwargs["data"] = self.data.loc[[time]]
kwargs["date"] = date
kwargs["state"] = kwargs["data"].iloc[0].values
kwargs["time"] = 1
return self.copy(**kwargs)
def trim_dates(self, start=0, end=None):
"""
Trim data in model to the given interval specified by start and end
dates or times.
Args:
start (int or date):
Starting date. If not given, start at zero.
end (int or date):
End date. If not given, select up to the final date.
"""
start = int(start or 0)
end = int(end or self.time)
new = self.copy(
date=self.to_date(start),
data=self.data.iloc[start:end].reset_index(drop=True),
time=end - start,
state=self.data.iloc[end].values,
)
return new
#
# Initial conditions
#
def set_ic(self, state=None, **kwargs):
"""
Set initial conditions.
"""
if self.state is None:
if state is None:
state = self.initial_state(**kwargs)
self.state = np.array(state, dtype=float)
alias = self.meta.data_aliases
for k, v in list(kwargs.items()):
if k in alias:
del kwargs[k]
kwargs[alias[k]] = v
components = extract_keys(self.meta.variables, kwargs)
for k, v in components.items():
idx = self.meta.get_variable_index(k)
self.state[idx] = v
return self
def set_data(self, data):
"""
Force a dataframe into simulation state.
"""
data = data.copy()
data.columns = [self.meta.data_aliases.get(c, c) for c in data.columns]
self.set_ic(state=data.iloc[0])
self.data = data.reset_index(drop=True)
self.time = len(data) - 1
self.date = data.index[-1]
self.state[:] = data.iloc[-1]
self.info["observed.dates"] = data.index[[0, -1]]
self._initialized = True
return self
def set_cases_from_region(self: T) -> T:
"""
Set the number of cases from region.
"""
self.set_cases()
return self
def set_cases(self: T, curves=None, adjust_R0=False, save_observed=False) -> T:
"""
Initialize model from a dataframe with the deaths and cases curve.
This curve is usually the output of disease.epidemic_curve(region), and is
automatically retrieved if not passed explicitly and the region of the model
is set.
Args:
curves:
Dataframe with cumulative ["cases", "deaths"] columns. If not given,
or None, fetches from disease.epidemic_curves(info)
adjust_R0:
If true, adjust R0 from the observed cases.
save_observed:
If true, save the cases curves into the model.info["observed.cases"] key.
"""
if curves is None:
warnings.warn("omitting curves from set_cases will be deprecated.")
if self.region is None or self.disease is None:
msg = 'must provide both "region" and "disease" or an explicit cases ' "curve."
raise ValueError(msg)
curves = self.region.pydemic.epidemic_curve(self.disease)
if adjust_R0:
warnings.warn("adjust_R0 argument is deprecated")
method = "RollingOLS" if adjust_R0 is True else adjust_R0
Re, _ = value = fit.estimate_R0(self, curves, Re=True, method=method)
assert np.isfinite(Re), f"invalid value for R0: {value}"
self.R0 = Re
# Save notification it in the info dictionary for reference
if "cases_observed" in curves:
tf = curves.index[-1]
rate = curves.loc[tf, "cases_observed"] / curves.loc[tf, "cases"]
else:
rate = 1.0
self.info["observed.notification_rate"] = rate
# Save simulation state from data
model = self.epidemic_model_name()
curve = fit.cases(curves)
data = fit.epidemic_curve(model, curve, self)
self.set_data(data)
self.initial_cases = curve.iloc[0]
if adjust_R0:
self.R0 /= self["susceptible:final"] / self.population
self.info["observed.R0"] = self.R0
# Optionally save cases curves into the info dictionary
if save_observed:
key = "observed.curves" if save_observed is True else save_observed
df = curves.rename(columns={"cases": "cases_raw"})
df["cases"] = curve
self.info[key] = df
return self
def adjust_R0(self, method="RollingOLS"):
curves = self["cases"]
self.R0, _ = fit.estimate_R0(self, curves, method=method)
self.info["observed.R0"] = self.R0
def initial_state(self, cases=None, **kwargs):
"""
Create the default initial vector for model.
"""
if cases is not None:
kwargs.setdefault("population", self.population)
return formulas.initial_state(self.epidemic_model_name(), cases, self, **kwargs)
return self._initial_state()
def infect(self, n=1, column="infectious"):
"""
Convert 'n' susceptible individuals to infectious.
"""
last = self.data.index[-1]
n = min(n, self.data.loc[last, "susceptible"])
self.data.loc[last, column] += n
self.data.loc[last, "susceptible"] -= n
return self
def _initial_state(self):
raise NotImplementedError
def initialize(self):
"""
Force initialization.
"""
if not self._initialized:
self.set_ic()
self.data = make_dataframe(self)
self._initialized = True
#
# Running simulation
#
def run(self: T, time) -> T:
"""
Runs the model for the given duration.
"""
steps = int(time)
self.initialize()
if time == 0:
return
_, *shape = self.data.shape
ts = self.time + 1.0 + np.arange(steps)
data = np.zeros((steps, *shape))
date = self.date
if self.info.get("event.simulation_start") is None:
self.info.save_event("simulation_start")
self.run_to_fill(data, ts)
extra = pd.DataFrame(data, columns=self.data.columns, index=ts)
self.data = | pd.concat([self.data, extra]) | pandas.concat |
from __future__ import print_function
import collections
import json
import logging
import os
import pickle
import sys
import numpy as np
import pandas as pd
import keras
from itertools import cycle, islice
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
from sklearn.model_selection import ShuffleSplit, KFold
import file_utils
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
sys.path.append(lib_path)
# import candle
import file_utils
global_cache = {}
SEED = 2018
P1B3_URL = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P1B3/'
DATA_URL = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/Pilot1/combo/'
logger = logging.getLogger(__name__)
def set_up_logger(verbose=False):
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter(''))
sh.setLevel(logging.DEBUG if verbose else logging.INFO)
logger.setLevel(logging.DEBUG)
logger.addHandler(sh)
def set_seed(seed=SEED):
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(seed)
random.seed(seed)
def get_file(url):
fname = os.path.basename(url)
return file_utils.get_file(fname, origin=url, cache_subdir='Pilot1')
def impute_and_scale(df, scaling='std', imputing='mean', dropna='all'):
"""Impute missing values with mean and scale data included in pandas dataframe.
Parameters
----------
df : pandas dataframe
dataframe to impute and scale
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
if dropna:
df = df.dropna(axis=1, how=dropna)
else:
empty_cols = df.columns[df.notnull().sum() == 0]
df[empty_cols] = 0
if imputing is None or imputing.lower() == 'none':
mat = df.values
else:
imputer = Imputer(strategy=imputing, axis=0)
mat = imputer.fit_transform(df)
if scaling is None or scaling.lower() == 'none':
return pd.DataFrame(mat, columns=df.columns)
if scaling == 'maxabs':
scaler = MaxAbsScaler()
elif scaling == 'minmax':
scaler = MinMaxScaler()
else:
scaler = StandardScaler()
mat = scaler.fit_transform(mat)
df = pd.DataFrame(mat, columns=df.columns)
return df
def discretize(df, col, bins=2, cutoffs=None):
y = df[col]
thresholds = cutoffs
if thresholds is None:
percentiles = [100 / bins * (i + 1) for i in range(bins - 1)]
thresholds = [np.percentile(y, x) for x in percentiles]
classes = np.digitize(y, thresholds)
df[col] = classes
return df
def save_combined_dose_response():
df1 = load_single_dose_response(combo_format=True, fraction=False)
df2 = load_combo_dose_response(fraction=False)
df = pd.concat([df1, df2])
df.to_csv('combined_drug_growth', index=False, sep='\t')
def load_combined_dose_response(rename=True):
df1 = load_single_dose_response(combo_format=True)
logger.info('Loaded {} single drug dose response measurements'.format(df1.shape[0]))
df2 = load_combo_dose_response()
logger.info('Loaded {} drug pair dose response measurements'.format(df2.shape[0]))
df = pd.concat([df1, df2])
logger.info('Combined dose response data contains sources: {}'.format(df['SOURCE'].unique()))
if rename:
df = df.rename(columns={'SOURCE': 'Source', 'CELL': 'Sample',
'DRUG1': 'Drug1', 'DRUG2': 'Drug2',
'DOSE1': 'Dose1', 'DOSE2': 'Dose2',
'GROWTH': 'Growth', 'STUDY': 'Study'})
return df
def load_single_dose_response(combo_format=False, fraction=True):
# path = get_file(DATA_URL + 'combined_single_drug_growth')
path = get_file(DATA_URL + 'rescaled_combined_single_drug_growth')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na', '-', ''],
# nrows=10,
dtype={'SOURCE': str, 'DRUG_ID': str,
'CELLNAME': str, 'CONCUNIT': str,
'LOG_CONCENTRATION': np.float32,
'EXPID': str, 'GROWTH': np.float32})
global_cache[path] = df
df['DOSE'] = -df['LOG_CONCENTRATION']
df = df.rename(columns={'CELLNAME': 'CELL', 'DRUG_ID': 'DRUG', 'EXPID': 'STUDY'})
df = df[['SOURCE', 'CELL', 'DRUG', 'DOSE', 'GROWTH', 'STUDY']]
if fraction:
df['GROWTH'] /= 100
if combo_format:
df = df.rename(columns={'DRUG': 'DRUG1', 'DOSE': 'DOSE1'})
df['DRUG2'] = np.nan
df['DOSE2'] = np.nan
df['DRUG2'] = df['DRUG2'].astype(object)
df['DOSE2'] = df['DOSE2'].astype(np.float32)
df = df[['SOURCE', 'CELL', 'DRUG1', 'DOSE1', 'DRUG2', 'DOSE2', 'GROWTH', 'STUDY']]
return df
def load_combo_dose_response(fraction=True):
path = get_file(DATA_URL + 'ComboDrugGrowth_Nov2017.csv')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep=',', engine='c',
na_values=['na', '-', ''],
usecols=['CELLNAME', 'NSC1', 'CONC1', 'NSC2', 'CONC2',
'PERCENTGROWTH', 'VALID', 'SCREENER', 'STUDY'],
# nrows=10000,
dtype={'CELLNAME': str, 'NSC1': str, 'NSC2': str,
'CONC1': np.float32, 'CONC2': np.float32,
'PERCENTGROWTH': np.float32, 'VALID': str,
'SCREENER': str, 'STUDY': str},
error_bad_lines=False, warn_bad_lines=True)
global_cache[path] = df
df = df[df['VALID'] == 'Y']
df['SOURCE'] = 'ALMANAC.' + df['SCREENER']
cellmap_path = get_file(DATA_URL + 'NCI60_CELLNAME_to_Combo.txt')
df_cellmap = pd.read_csv(cellmap_path, sep='\t')
df_cellmap.set_index('Name', inplace=True)
cellmap = df_cellmap[['NCI60.ID']].to_dict()['NCI60.ID']
df['CELL'] = df['CELLNAME'].map(lambda x: cellmap[x])
df['DOSE1'] = -np.log10(df['CONC1'])
df['DOSE2'] = -np.log10(df['CONC2'])
df['DRUG1'] = 'NSC.' + df['NSC1']
df['DRUG2'] = 'NSC.' + df['NSC2']
if fraction:
df['GROWTH'] = df['PERCENTGROWTH'] / 100
else:
df['GROWTH'] = df['PERCENTGROWTH']
df = df[['SOURCE', 'CELL', 'DRUG1', 'DOSE1', 'DRUG2', 'DOSE2', 'GROWTH', 'STUDY']]
return df
def load_aggregated_single_response(target='AUC', min_r2_fit=0.3, max_ec50_se=3, combo_format=False, rename=True):
path = get_file(DATA_URL + 'combined_single_response_agg')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, engine='c', sep='\t',
dtype={'SOURCE': str, 'CELL': str, 'DRUG': str, 'STUDY': str,
'AUC': np.float32, 'IC50': np.float32,
'EC50': np.float32, 'EC50se': np.float32,
'R2fit': np.float32, 'Einf': np.float32,
'HS': np.float32, 'AAC1': np.float32,
'AUC1': np.float32, 'DSS1': np.float32})
global_cache[path] = df
total = len(df)
df = df[(df['R2fit'] >= min_r2_fit) & (df['EC50se'] <= max_ec50_se)]
df = df[['SOURCE', 'CELL', 'DRUG', target, 'STUDY']]
df = df[~df[target].isnull()]
logger.info('Loaded %d dose indepdendent response samples (filtered by EC50se <= %f & R2fit >=%f from a total of %d).', len(df), max_ec50_se, min_r2_fit, total)
if combo_format:
df = df.rename(columns={'DRUG': 'DRUG1'})
df['DRUG2'] = np.nan
df['DRUG2'] = df['DRUG2'].astype(object)
df = df[['SOURCE', 'CELL', 'DRUG1', 'DRUG2', target, 'STUDY']]
if rename:
df = df.rename(columns={'SOURCE': 'Source', 'CELL': 'Sample',
'DRUG1': 'Drug1', 'DRUG2': 'Drug2', 'STUDY': 'Study'})
else:
if rename:
df = df.rename(columns={'SOURCE': 'Source', 'CELL': 'Sample',
'DRUG': 'Drug', 'STUDY': 'Study'})
return df
def load_drug_data(ncols=None, scaling='std', imputing='mean', dropna=None, add_prefix=True):
df_info = load_drug_info()
df_info['Drug'] = df_info['PUBCHEM']
df_desc = load_drug_set_descriptors(drug_set='Combined_PubChem', ncols=ncols)
df_fp = load_drug_set_fingerprints(drug_set='Combined_PubChem', ncols=ncols)
df_desc = pd.merge(df_info[['ID', 'Drug']], df_desc, on='Drug').drop('Drug', 1).rename(columns={'ID': 'Drug'})
df_fp = pd.merge(df_info[['ID', 'Drug']], df_fp, on='Drug').drop('Drug', 1).rename(columns={'ID': 'Drug'})
df_desc2 = load_drug_set_descriptors(drug_set='NCI60', usecols=df_desc.columns.tolist() if ncols else None)
df_fp2 = load_drug_set_fingerprints(drug_set='NCI60', usecols=df_fp.columns.tolist() if ncols else None)
df_desc = pd.concat([df_desc, df_desc2]).reset_index(drop=True)
df1 = pd.DataFrame(df_desc.loc[:, 'Drug'])
df2 = df_desc.drop('Drug', 1)
df2 = impute_and_scale(df2, scaling=scaling, imputing=imputing, dropna=dropna)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
df_desc = pd.concat([df1, df2], axis=1)
df_fp = pd.concat([df_fp, df_fp2]).reset_index(drop=True)
df1 = pd.DataFrame(df_fp.loc[:, 'Drug'])
df2 = df_fp.drop('Drug', 1)
df2 = impute_and_scale(df2, scaling=None, imputing=imputing, dropna=dropna)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
df_fp = pd.concat([df1, df2], axis=1)
logger.info('Loaded combined dragon7 drug descriptors: %s', df_desc.shape)
logger.info('Loaded combined dragon7 drug fingerprints: %s', df_fp.shape)
return df_desc, df_fp
def load_drug_descriptors(ncols=None, scaling='std', imputing='mean', dropna=None, add_prefix=True, feature_subset=None):
df_info = load_drug_info()
df_info['Drug'] = df_info['PUBCHEM']
df_desc = load_drug_set_descriptors(drug_set='Combined_PubChem', ncols=ncols)
df_desc = pd.merge(df_info[['ID', 'Drug']], df_desc, on='Drug').drop('Drug', 1).rename(columns={'ID': 'Drug'})
df_desc2 = load_drug_set_descriptors(drug_set='NCI60', usecols=df_desc.columns.tolist() if ncols else None)
df_desc = pd.concat([df_desc, df_desc2]).reset_index(drop=True)
df1 = pd.DataFrame(df_desc.loc[:, 'Drug'])
df2 = df_desc.drop('Drug', 1)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
if feature_subset:
df2 = df2[[x for x in df2.columns if x in feature_subset]]
df2 = impute_and_scale(df2, scaling=scaling, imputing=imputing, dropna=dropna)
df_desc = pd.concat([df1, df2], axis=1)
logger.info('Loaded combined dragon7 drug descriptors: %s', df_desc.shape)
return df_desc
def load_drug_fingerprints(ncols=None, scaling='std', imputing='mean', dropna=None, add_prefix=True, feature_subset=None):
df_info = load_drug_info()
df_info['Drug'] = df_info['PUBCHEM']
df_fp = load_drug_set_fingerprints(drug_set='Combined_PubChem', ncols=ncols)
df_fp = pd.merge(df_info[['ID', 'Drug']], df_fp, on='Drug').drop('Drug', 1).rename(columns={'ID': 'Drug'})
df_fp2 = load_drug_set_fingerprints(drug_set='NCI60', usecols=df_fp.columns.tolist() if ncols else None)
df_fp = pd.concat([df_fp, df_fp2]).reset_index(drop=True)
df1 = | pd.DataFrame(df_fp.loc[:, 'Drug']) | pandas.DataFrame |
import argparse
import numpy as np
import pandas as pd
import seaborn as sns
from pathlib import Path
import matplotlib.pyplot as plt
import context
from mhealth.utils.commons import print_title
from mhealth.utils.context_info import dump_context
from mhealth.utils.plotter_helper import save_figure, setup_plotting
def dataset_id(df):
# Format for identifiers: "[0-9]{3}[LR]"
# For example: "018L" or "042R"
pat = df["Patient"].map("{0:03d}".format)
side = df["Side"].str[0].str.upper()
return pat + side
def read_summary(path):
df = pd.read_csv(path, header=[0,1], index_col=[0,1,2])
df[("Time","Start")] = pd.to_datetime(df[("Time","Start")])
df[("Time","End")] = pd.to_datetime(df[("Time","End")])
return df
def read_exercises(path):
df = pd.read_csv(path)
df["StartDate"] = pd.to_datetime(df["StartDate"])
df["Duration"] = pd.to_timedelta(df["Duration"])
df["EndDate"] = pd.to_datetime(df["EndDate"])
return df
def plot_qualities(df_before, df_after, out_dir):
qualities = ["HRQ", "SpO2Q", "QualityClassification"]
df_before = df_before.loc[:,pd.IndexSlice[qualities, "mean"]].copy()
df_before["Label"] = "before"
df_after = df_after.loc[:,pd.IndexSlice[qualities, "mean"]].copy()
df_after["Label"] = "after"
df = | pd.concat([df_before, df_after], axis=0) | pandas.concat |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import csv
from io import StringIO
from pandas import DataFrame
import pandas._testing as tm
from pandas.io.parsers import TextParser
def test_read_data_list(all_parsers):
parser = all_parsers
kwargs = {"index_col": 0}
data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]
expected = parser.read_csv(StringIO(data), **kwargs)
with TextParser(data_list, chunksize=2, **kwargs) as parser:
result = parser.read()
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')
])
weights2 = pd.DataFrame(vals, index=widx, columns=["CO0", "CO1"])
weights = {"CL": weights1, "CO": weights2}
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15, 0.1, 0.15],
[0.075, 0.45, 0.075, 0.25],
[-0.5, 0.2, pd.np.NaN, pd.np.NaN]],
index=weights["CL"].index.levels[0],
columns=['CL0', 'CL1', 'CO0', 'CO1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_instr_rets_key_error():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5')])
irets = pd.Series([0.02, 0.01, 0.012], index=idx)
vals = [1, 1/2, 1/2, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(KeyError):
util.calc_rets(irets, weights)
def test_calc_rets_nan_instr_rets():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([pd.np.NaN, pd.np.NaN, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([pd.np.NaN, pd.np.NaN, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_weight():
# see https://github.com/matthewgilbert/mapping/issues/8
# missing weight for return
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
rets = pd.Series([0.02, -0.03, 0.06], index=idx)
vals = [1, 1]
widx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
# extra instrument
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights1 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLH5'), # extra day for no weight instrument
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5')
])
rets = pd.Series([0.02, -0.03, 0.06, 0.05, 0.01], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights1)
# leading / trailing returns
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights2 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-05'), 'CLF5')])
rets = pd.Series([0.02, -0.03, 0.06, 0.05], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights2)
def test_to_notional_empty():
instrs = pd.Series()
prices = pd.Series()
multipliers = pd.Series()
res_exp = pd.Series()
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_same_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_extra_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2, 13.1], index=['CLZ6', 'COZ6',
'GCZ6', 'extra'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_missing_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, pd.np.NaN],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_different_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
res_exp = pd.Series([-30.20, 2 * 30.5 / 1.32, 10.2 * 0.8],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
assert_series_equal(res, res_exp)
def test_to_notional_duplicates():
instrs = pd.Series([1, 1], index=['A', 'A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37, 200.37], index=['A', 'A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100, 100], index=['A', 'A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD', 'USD'], index=['A', 'A'])
fx_rate = pd.Series([1.32], index=['USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD'], index=['A'])
fx_rate = pd.Series([1.32, 1.32], index=['USDCAD', 'USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
def test_to_notional_bad_fx():
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
instr_fx = pd.Series(['JPY'], index=['A'])
fx_rates = pd.Series([1.32], index=['GBPCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
def test_to_contracts_rounder():
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
multipliers = pd.Series([1, 1], index=['CLZ6', 'COZ6'])
# 30.19 / 30.20 is slightly less than 1 so will round to 0
notional = pd.Series([30.19, 2 * 30.5], index=['CLZ6', 'COZ6'])
res = util.to_contracts(notional, prices, multipliers,
rounder=pd.np.floor)
res_exp = pd.Series([0, 2], index=['CLZ6', 'COZ6'])
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier():
notionals = pd.Series([-30.20, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier_rounding():
# won't work out to integer number of contracts so this tests rounding
notionals = pd.Series([-30.21, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_trade_with_zero_amount():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, 0], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) + 0 * 0.5 / (50.41*100) - 1,
# 0 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 19], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_trade_all_zero_amount_return_empty():
wts = pd.DataFrame([1], index=["CLX16"], columns=[0])
desired_holdings = pd.Series([13], index=[0])
current_contracts = 0
prices = pd.Series([50.32], index=['CLX16'])
multiplier = pd.Series([100], index=['CLX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
exp_trades = pd.Series(dtype="int64")
assert_series_equal(trades, exp_trades)
def test_trade_one_asset():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_multi_asset():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=["CL0", "CL1"])
wts2 = pd.DataFrame([1], index=["COX16"], columns=["CO0"])
wts = {"CL": wts1, "CO": wts2}
desired_holdings = pd.Series([200000, -50000, 100000],
index=["CL0", "CL1", "CO0"])
current_contracts = pd.Series([0, 1, 0, 5],
index=['CLX16', 'CLZ16', 'CLF17',
'COX16'])
prices = pd.Series([50.32, 50.41, 50.48, 49.50],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
multiplier = pd.Series([100, 100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
# 100000 * 1 / (49.50*100) - 5,
exp_trades = pd.Series([20, 14, -5, 15], index=['CLX16', 'CLZ16',
'CLF17', 'COX16'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_extra_desired_holdings_without_weights():
wts = pd.DataFrame([0], index=["CLX16"], columns=["CL0"])
desired_holdings = pd.Series([200000, 10000], index=["CL0", "CL1"])
current_contracts = pd.Series([0], index=['CLX16'])
prices = pd.Series([50.32], index=['CLX16'])
multipliers = pd.Series([1], index=['CLX16'])
with pytest.raises(ValueError):
util.calc_trades(current_contracts, desired_holdings, wts, prices,
multipliers)
def test_trade_extra_desired_holdings_without_current_contracts():
# this should treat the missing holdings as 0, since this would often
# happen when adding new positions without any current holdings
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1],
index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
# non existent contract holdings result in fill value being a float,
# which casts to float64
assert_series_equal(trades, exp_trades, check_dtype=False)
def test_trade_extra_weights():
# extra weights should be ignored
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000], index=[0])
current_contracts = pd.Series([0, 2], index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
multiplier = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 2,
exp_trades = pd.Series([20, 18], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_get_multiplier_dataframe_weights():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000], index=["CL"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dict_weights():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
wts2 = pd.DataFrame([0.5, 0.5], index=["COX16", "COZ16"], columns=[0])
wts = {"CL": wts1, "CO": wts2}
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16", "COX16",
"COZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dataframe_weights_multiplier_asts_error():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
with pytest.raises(ValueError):
util.get_multiplier(wts, ast_mult)
def test_weighted_expiration_two_generics():
vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF15'),
(TS('2015-01-03'), 'CLG15'),
(TS('2015-01-04'), 'CLF15'),
(TS('2015-01-04'), 'CLG15'),
(TS('2015-01-04'), 'CLH15'),
(TS('2015-01-05'), 'CLG15'),
(TS('2015-01-05'), 'CLH15')])
weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=idx)
contract_dates = pd.Series([TS('2015-01-20'),
TS('2015-02-21'),
TS('2015-03-20')],
index=['CLF15', 'CLG15', 'CLH15'])
wexp = util.weighted_expiration(weights, contract_dates)
exp_wexp = pd.DataFrame([[17.0, 49.0], [32.0, 61.5], [47.0, 74.0]],
index=[TS('2015-01-03'),
TS('2015-01-04'),
TS('2015-01-05')],
columns=["CL1", "CL2"])
assert_frame_equal(wexp, exp_wexp)
def test_flatten():
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
flat_wts = util.flatten(weights)
flat_wts_exp = pd.DataFrame(
{"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
"contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
"generic": ["CL1", "CL2"] * 4,
"weight": [1, 0, 0, 1, 1, 0, 0, 1]}
).loc[:, ["date", "contract", "generic", "weight"]]
assert_frame_equal(flat_wts, flat_wts_exp)
def test_flatten_dict():
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5')])
weights2 = pd.DataFrame(1, index=widx, columns=["CO1"])
weights = {"CL": weights1, "CO": weights2}
flat_wts = util.flatten(weights)
flat_wts_exp = pd.DataFrame(
{"date": ([TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4
+ [TS('2015-01-03')]),
"contract": (['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2
+ ["COF5"]),
"generic": ["CL1", "CL2"] * 4 + ["CO1"],
"weight": [1, 0, 0, 1, 1, 0, 0, 1, 1],
"key": ["CL"] * 8 + ["CO"]}
).loc[:, ["date", "contract", "generic", "weight", "key"]]
assert_frame_equal(flat_wts, flat_wts_exp)
def test_flatten_bad_input():
dummy = 0
with pytest.raises(ValueError):
util.flatten(dummy)
def test_unflatten():
flat_wts = pd.DataFrame(
{"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
"contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
"generic": ["CL1", "CL2"] * 4,
"weight": [1, 0, 0, 1, 1, 0, 0, 1]}
).loc[:, ["date", "contract", "generic", "weight"]]
wts = util.unflatten(flat_wts)
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')],
names=("date", "contract"))
cols = pd.Index(["CL1", "CL2"], name="generic")
wts_exp = pd.DataFrame(vals, index=widx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_unflatten_dict():
flat_wts = pd.DataFrame(
{"date": ([TS('2015-01-03')] * 4 + [ | TS('2015-01-04') | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 4 14:17:17 2021
@author: supokhrel
"""
from PyQt5 import QtWidgets, uic, QtCore
import sys
import os
import threading
File_Path = ''
init_dir = os.getcwd()
def BrowseFile():
global status
global File_Path
global init_dir
# print("Browsing...")
fileName, _ = QtWidgets.QFileDialog.getOpenFileName(None, 'Single File', init_dir , '*.csv')
# print(fileName)
if (fileName == ''):
call.lineEdit.setText("!!!! File Not selected")
File_Path = ''
else:
call.lineEdit.setText(fileName)
File_Path = fileName
init_dir = os.path.dirname(os.path.abspath(fileName))
call.lineEdit_7.setText("Filling Entries")
def string_conveter_func(get_str):
new_str = get_str.replace("PM10", "PM$\mathregular{_{10}}$")
new_str = new_str.replace("PM2.5", "PM$\mathregular{_{2.5}}$")
new_str = new_str.replace("PM1", "PM$\mathregular{_{1}}$")
new_str = new_str.replace("O3", "O$\mathregular{_{3}}$")
new_str = new_str.replace("CO2", "CO$\mathregular{_{2}}$")
new_str = new_str.replace("SO2", "SO$\mathregular{_{2}}$")
new_str = new_str.replace("NOx", "NO$\mathregular{_{x}}$")
new_str = new_str.replace("NO2", "NO$\mathregular{_{2}}$")
new_str = new_str.replace("ug/m3", "µg/m$\mathregular{^{3}}$")
new_str = new_str.replace("DEG", "$\degree$")
return new_str
def is_int_or_float(s):
''' return 1 for int, 2 for float, -1 for not a number'''
try:
float(s)
return 1 if s.count('.')==0 else 2
except ValueError:
return -1
from matplotlib import pyplot as plt
#import numpy as np
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
def Data_process():
# print("Started")
# call.update()
call.lineEdit_7.setText('Processing....... Please do not press any button')
import pandas as pd
import glob
# import numpy as np
from matplotlib import rc
from datetime import datetime
global fig, ax
# ax = None
# fig, ax = plt.subplots()
File = glob.glob(File_Path)
if not File:
# print("Empty Filename")
# window.destroy()
call.lineEdit_7.setText('Error: No input file selected.')
return 0
Plot_title = call.lineEdit_2.text()
Plot_title = string_conveter_func(Plot_title)
X_lab = call.lineEdit_3.text()
X_lab = string_conveter_func(X_lab)
Y_lab = call.lineEdit_4.text()
Y_lab = string_conveter_func(Y_lab)
y_min = call.lineEdit_5.text()
y_min_contain = False
if(y_min != ''):
y_min_contain = True
check_ymin = is_int_or_float(y_min)
if (check_ymin == 1):
y_min = int(y_min)
elif (check_ymin == 2):
y_min = float(y_min)
else:
call.lineEdit_7.setText('Error: Y-min is not a numerical value')
return 0
y_max = call.lineEdit_6.text()
y_max_contain = False
if(y_max !=''):
y_max_contain = True
check_ymax = is_int_or_float(y_max)
if (check_ymax == 1):
y_max = int(y_max)
elif (check_ymax == 2):
y_max = float(y_max)
else:
call.lineEdit_7.setText('Error: Y-max is not a numerical value')
return 0
plot_type = call.comboBox.currentText()
# print("Plot-type",plot_type )
res_dpi = call.comboBox_2.currentText()
X_size = int(call.comboBox_3.currentText())
Y_size = int(call.comboBox_4.currentText())
Y_grid = call.radioButton_3.isChecked()
X_grid = call.radioButton_5.isChecked()
try:
df = pd.concat([pd.read_csv(fp, low_memory=False) for fp in File], ignore_index=True)
except:
call.lineEdit_7.setText('E rror in reading th e input f ile')
return 0
for i in range(1, len(df.columns)):
df[df.columns[i]] = | pd.to_numeric(df[df.columns[i]], errors='coerce') | pandas.to_numeric |
import pandas as pd
import numpy as np
import pytest
from kgextension.endpoints import DBpedia
from kgextension.schema_matching import (
relational_matching,
label_schema_matching,
value_overlap_matching,
string_similarity_matching
)
class TestRelationalMatching:
def test1_default(self):
path_input = "test/data/schema_matching/default_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = relational_matching(df)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test2_no_matches(self):
path_input = "test/data/schema_matching/no_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/no_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = relational_matching(df)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test3_uri_querier(self):
path_input = "test/data/schema_matching/default_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = relational_matching(df, uri_data_model=True)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test4_uri_querier_no_matches(self):
path_input = "test/data/schema_matching/no_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/no_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = relational_matching(df, uri_data_model=True)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test5_match_score(self):
score = 0.76
path_input = "test/data/schema_matching/default_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
expected_matches['value'] = np.where(
expected_matches['value']==1, score, expected_matches['value'])
output_matches = relational_matching(df, match_score=score)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test6_one_endpoint(self):
path_input = "test/data/schema_matching/default_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = relational_matching(df, endpoints=DBpedia)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test7_no_http_input(self):
df = pd.DataFrame({'a': [1, 2, 3],
'b': [4, 5, 6]})
expected_matches = pd.DataFrame(columns=["uri_1", "uri_2", "value"])
output_matches = relational_matching(df)
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
class TestStringSimilarityMatching():
def test1_default(self):
path_input = "test/data/schema_matching/string_matching_input_t1t2.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/string_matching_output_t1.csv"
result_expected = pd.read_csv(path_expected)
result = string_similarity_matching(df, prefix_threshold=1)
pd.testing.assert_frame_equal(result, result_expected, check_like=True)
def test2_highthreshold(self):
path_input = "test/data/schema_matching/string_matching_input_t1t2.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/string_matching_output_t2.csv"
result_expected = pd.read_csv(path_expected)
result = string_similarity_matching(df, prefix_threshold=10)
pd.testing.assert_frame_equal(result, result_expected, check_like=True)
def test3_diffpredicate_diffmetric(self):
path_input = "test/data/schema_matching/string_matching_input_t3.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/string_matching_output_t3.csv"
result_expected = pd.read_csv(path_expected)
result = string_similarity_matching(df, predicate="dbo:abstract", to_lowercase=False, remove_prefixes=False, remove_punctuation=False, similarity_metric="token_set_levenshtein")
pd.testing.assert_frame_equal(result, result_expected, check_like=True)
class TestLabelSchemaMatching:
def test1_default(self):
path_input = "test/data/schema_matching/default_matches_cities_boolean_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_boolean_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = label_schema_matching(df)
output_matches['same_label'] = pd.to_numeric(output_matches['same_label'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test2_no_matches(self):
path_input = "test/data/schema_matching/no_matches_cities_boolean_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/no_matches_cities_boolean_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = label_schema_matching(df)
output_matches['same_label'] = pd.to_numeric(output_matches['same_label'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test3_uri_querier(self):
path_input = "test/data/schema_matching/default_matches_cities_boolean_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_boolean_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = label_schema_matching(df, uri_data_model=True)
output_matches['same_label'] = pd.to_numeric(output_matches['same_label'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test4_uri_querier_no_matches(self):
path_input = "test/data/schema_matching/no_matches_cities_boolean_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/no_matches_cities_boolean_expected.csv"
expected_matches = | pd.read_csv(path_expected) | pandas.read_csv |
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the zipline.assets package
"""
from contextlib import contextmanager
from datetime import timedelta
from functools import partial
import pickle
import sys
from types import GetSetDescriptorType
from unittest import TestCase
import uuid
import warnings
from nose_parameterized import parameterized
from numpy import full, int32, int64
import pandas as pd
from pandas.util.testing import assert_frame_equal
from six import PY2, viewkeys
import sqlalchemy as sa
from zipline.assets import (
Asset,
Equity,
Future,
AssetDBWriter,
AssetFinder,
)
from zipline.assets.synthetic import (
make_commodity_future_info,
make_rotating_equity_info,
make_simple_equity_info,
)
from six import itervalues, integer_types
from toolz import valmap
from zipline.assets.asset_writer import (
check_version_info,
write_version_info,
_futures_defaults,
SQLITE_MAX_VARIABLE_NUMBER,
)
from zipline.assets.asset_db_schema import ASSET_DB_VERSION
from zipline.assets.asset_db_migrations import (
downgrade
)
from zipline.errors import (
EquitiesNotFound,
FutureContractsNotFound,
MultipleSymbolsFound,
MultipleValuesFoundForField,
MultipleValuesFoundForSid,
NoValueForSid,
AssetDBVersionError,
SidsNotFound,
SymbolNotFound,
AssetDBImpossibleDowngrade,
ValueNotFoundForField,
)
from zipline.testing import (
all_subindices,
empty_assets_db,
parameter_space,
tmp_assets_db,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.fixtures import (
WithAssetFinder,
ZiplineTestCase,
WithTradingCalendars,
)
from zipline.utils.range import range
@contextmanager
def build_lookup_generic_cases(asset_finder_type):
"""
Generate test cases for the type of asset finder specific by
asset_finder_type for test_lookup_generic.
"""
unique_start = pd.Timestamp('2013-01-01', tz='UTC')
unique_end = pd.Timestamp('2014-01-01', tz='UTC')
dupe_0_start = pd.Timestamp('2013-01-01', tz='UTC')
dupe_0_end = dupe_0_start + timedelta(days=1)
dupe_1_start = pd.Timestamp('2013-01-03', tz='UTC')
dupe_1_end = dupe_1_start + timedelta(days=1)
equities = pd.DataFrame.from_records(
[
{
'sid': 0,
'symbol': 'duplicated',
'start_date': dupe_0_start.value,
'end_date': dupe_0_end.value,
'exchange': 'TEST',
},
{
'sid': 1,
'symbol': 'duplicated',
'start_date': dupe_1_start.value,
'end_date': dupe_1_end.value,
'exchange': 'TEST',
},
{
'sid': 2,
'symbol': 'unique',
'start_date': unique_start.value,
'end_date': unique_end.value,
'exchange': 'TEST',
},
],
index='sid'
)
fof14_sid = 10000
futures = pd.DataFrame.from_records(
[
{
'sid': fof14_sid,
'symbol': 'FOF14',
'root_symbol': 'FO',
'start_date': unique_start.value,
'end_date': unique_end.value,
'exchange': 'FUT',
},
],
index='sid'
)
root_symbols = pd.DataFrame({
'root_symbol': ['FO'],
'root_symbol_id': [1],
'exchange': ['CME'],
})
with tmp_assets_db(
equities=equities, futures=futures, root_symbols=root_symbols) \
as assets_db:
finder = asset_finder_type(assets_db)
dupe_0, dupe_1, unique = assets = [
finder.retrieve_asset(i)
for i in range(3)
]
fof14 = finder.retrieve_asset(fof14_sid)
cf = finder.create_continuous_future(
root_symbol=fof14.root_symbol, offset=0, roll_style='volume',
)
dupe_0_start = dupe_0.start_date
dupe_1_start = dupe_1.start_date
yield (
##
# Scalars
# Asset object
(finder, assets[0], None, assets[0]),
(finder, assets[1], None, assets[1]),
(finder, assets[2], None, assets[2]),
# int
(finder, 0, None, assets[0]),
(finder, 1, None, assets[1]),
(finder, 2, None, assets[2]),
# Duplicated symbol with resolution date
(finder, 'DUPLICATED', dupe_0_start, dupe_0),
(finder, 'DUPLICATED', dupe_1_start, dupe_1),
# Unique symbol, with or without resolution date.
(finder, 'UNIQUE', unique_start, unique),
(finder, 'UNIQUE', None, unique),
# Futures
(finder, 'FOF14', None, fof14),
# Future symbols should be unique, but including as_of date
# make sure that code path is exercised.
(finder, 'FOF14', unique_start, fof14),
# Futures int
(finder, fof14_sid, None, fof14),
# Future symbols should be unique, but including as_of date
# make sure that code path is exercised.
(finder, fof14_sid, unique_start, fof14),
# ContinuousFuture
(finder, cf, None, cf),
##
# Iterables
# Iterables of Asset objects.
(finder, assets, None, assets),
(finder, iter(assets), None, assets),
# Iterables of ints
(finder, (0, 1), None, assets[:-1]),
(finder, iter((0, 1)), None, assets[:-1]),
# Iterables of symbols.
(finder, ('DUPLICATED', 'UNIQUE'), dupe_0_start, [dupe_0, unique]),
(finder, ('DUPLICATED', 'UNIQUE'), dupe_1_start, [dupe_1, unique]),
# Mixed types
(finder,
('DUPLICATED', 2, 'UNIQUE', 1, dupe_1),
dupe_0_start,
[dupe_0, assets[2], unique, assets[1], dupe_1]),
# Futures and Equities
(finder, ['FOF14', 0], None, [fof14, assets[0]]),
# ContinuousFuture and Equity
(finder, [cf, 0], None, [cf, assets[0]]),
)
class AssetTestCase(TestCase):
# Dynamically list the Asset properties we want to test.
asset_attrs = [name for name, value in vars(Asset).items()
if isinstance(value, GetSetDescriptorType)]
# Very wow
asset = Asset(
1337,
symbol="DOGE",
asset_name="DOGECOIN",
start_date=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
end_date=pd.Timestamp('2014-06-25 11:21AM', tz='UTC'),
first_traded=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
auto_close_date=pd.Timestamp('2014-06-26 11:21AM', tz='UTC'),
exchange='THE MOON',
)
asset3 = Asset(3, exchange="test")
asset4 = Asset(4, exchange="test")
asset5 = Asset(5, exchange="still testing")
def test_asset_object(self):
the_asset = Asset(5061, exchange="bar")
self.assertEquals({5061: 'foo'}[the_asset], 'foo')
self.assertEquals(the_asset, 5061)
self.assertEquals(5061, the_asset)
self.assertEquals(the_asset, the_asset)
self.assertEquals(int(the_asset), 5061)
self.assertEquals(str(the_asset), 'Asset(5061)')
def test_to_and_from_dict(self):
asset_from_dict = Asset.from_dict(self.asset.to_dict())
for attr in self.asset_attrs:
self.assertEqual(
getattr(self.asset, attr), getattr(asset_from_dict, attr),
)
def test_asset_is_pickleable(self):
asset_unpickled = pickle.loads(pickle.dumps(self.asset))
for attr in self.asset_attrs:
self.assertEqual(
getattr(self.asset, attr), getattr(asset_unpickled, attr),
)
def test_asset_comparisons(self):
s_23 = Asset(23, exchange="test")
s_24 = Asset(24, exchange="test")
self.assertEqual(s_23, s_23)
self.assertEqual(s_23, 23)
self.assertEqual(23, s_23)
self.assertEqual(int32(23), s_23)
self.assertEqual(int64(23), s_23)
self.assertEqual(s_23, int32(23))
self.assertEqual(s_23, int64(23))
# Check all int types (includes long on py2):
for int_type in integer_types:
self.assertEqual(int_type(23), s_23)
self.assertEqual(s_23, int_type(23))
self.assertNotEqual(s_23, s_24)
self.assertNotEqual(s_23, 24)
self.assertNotEqual(s_23, "23")
self.assertNotEqual(s_23, 23.5)
self.assertNotEqual(s_23, [])
self.assertNotEqual(s_23, None)
# Compare to a value that doesn't fit into a platform int:
self.assertNotEqual(s_23, sys.maxsize + 1)
self.assertLess(s_23, s_24)
self.assertLess(s_23, 24)
self.assertGreater(24, s_23)
self.assertGreater(s_24, s_23)
def test_lt(self):
self.assertTrue(self.asset3 < self.asset4)
self.assertFalse(self.asset4 < self.asset4)
self.assertFalse(self.asset5 < self.asset4)
def test_le(self):
self.assertTrue(self.asset3 <= self.asset4)
self.assertTrue(self.asset4 <= self.asset4)
self.assertFalse(self.asset5 <= self.asset4)
def test_eq(self):
self.assertFalse(self.asset3 == self.asset4)
self.assertTrue(self.asset4 == self.asset4)
self.assertFalse(self.asset5 == self.asset4)
def test_ge(self):
self.assertFalse(self.asset3 >= self.asset4)
self.assertTrue(self.asset4 >= self.asset4)
self.assertTrue(self.asset5 >= self.asset4)
def test_gt(self):
self.assertFalse(self.asset3 > self.asset4)
self.assertFalse(self.asset4 > self.asset4)
self.assertTrue(self.asset5 > self.asset4)
def test_type_mismatch(self):
if sys.version_info.major < 3:
self.assertIsNotNone(self.asset3 < 'a')
self.assertIsNotNone('a' < self.asset3)
else:
with self.assertRaises(TypeError):
self.asset3 < 'a'
with self.assertRaises(TypeError):
'a' < self.asset3
class TestFuture(WithAssetFinder, ZiplineTestCase):
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
2468: {
'symbol': 'OMH15',
'root_symbol': 'OM',
'notice_date': pd.Timestamp('2014-01-20', tz='UTC'),
'expiration_date': pd.Timestamp('2014-02-20', tz='UTC'),
'auto_close_date': pd.Timestamp('2014-01-18', tz='UTC'),
'tick_size': .01,
'multiplier': 500.0,
'exchange': "TEST",
},
0: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC'),
'multiplier': 1.0,
'exchange': 'TEST',
},
},
orient='index',
)
@classmethod
def init_class_fixtures(cls):
super(TestFuture, cls).init_class_fixtures()
cls.future = cls.asset_finder.lookup_future_symbol('OMH15')
cls.future2 = cls.asset_finder.lookup_future_symbol('CLG06')
def test_str(self):
strd = str(self.future)
self.assertEqual("Future(2468 [OMH15])", strd)
def test_repr(self):
reprd = repr(self.future)
self.assertIn("Future", reprd)
self.assertIn("2468", reprd)
self.assertIn("OMH15", reprd)
self.assertIn("root_symbol=%s'OM'" % ('u' if PY2 else ''), reprd)
self.assertIn(
"notice_date=Timestamp('2014-01-20 00:00:00+0000', tz='UTC')",
reprd,
)
self.assertIn(
"expiration_date=Timestamp('2014-02-20 00:00:00+0000'",
reprd,
)
self.assertIn(
"auto_close_date=Timestamp('2014-01-18 00:00:00+0000'",
reprd,
)
self.assertIn("tick_size=0.01", reprd)
self.assertIn("multiplier=500", reprd)
def test_reduce(self):
assert_equal(
pickle.loads(pickle.dumps(self.future)).to_dict(),
self.future.to_dict(),
)
def test_to_and_from_dict(self):
dictd = self.future.to_dict()
for field in _futures_defaults.keys():
self.assertTrue(field in dictd)
from_dict = Future.from_dict(dictd)
self.assertTrue(isinstance(from_dict, Future))
self.assertEqual(self.future, from_dict)
def test_root_symbol(self):
self.assertEqual('OM', self.future.root_symbol)
def test_lookup_future_symbol(self):
"""
Test the lookup_future_symbol method.
"""
om = TestFuture.asset_finder.lookup_future_symbol('OMH15')
self.assertEqual(om.sid, 2468)
self.assertEqual(om.symbol, 'OMH15')
self.assertEqual(om.root_symbol, 'OM')
self.assertEqual(om.notice_date, pd.Timestamp('2014-01-20', tz='UTC'))
self.assertEqual(om.expiration_date,
pd.Timestamp('2014-02-20', tz='UTC'))
self.assertEqual(om.auto_close_date,
pd.Timestamp('2014-01-18', tz='UTC'))
cl = TestFuture.asset_finder.lookup_future_symbol('CLG06')
self.assertEqual(cl.sid, 0)
self.assertEqual(cl.symbol, 'CLG06')
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.start_date, pd.Timestamp('2005-12-01', tz='UTC'))
self.assertEqual(cl.notice_date, pd.Timestamp('2005-12-20', tz='UTC'))
self.assertEqual(cl.expiration_date,
pd.Timestamp('2006-01-20', tz='UTC'))
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('#&?!')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('FOOBAR')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('XXX99')
class AssetFinderTestCase(WithTradingCalendars, ZiplineTestCase):
asset_finder_type = AssetFinder
def write_assets(self, **kwargs):
self._asset_writer.write(**kwargs)
def init_instance_fixtures(self):
super(AssetFinderTestCase, self).init_instance_fixtures()
conn = self.enter_instance_context(empty_assets_db())
self._asset_writer = AssetDBWriter(conn)
self.asset_finder = self.asset_finder_type(conn)
def test_blocked_lookup_symbol_query(self):
# we will try to query for more variables than sqlite supports
# to make sure we are properly chunking on the client side
as_of = pd.Timestamp('2013-01-01', tz='UTC')
# we need more sids than we can query from sqlite
nsids = SQLITE_MAX_VARIABLE_NUMBER + 10
sids = range(nsids)
frame = pd.DataFrame.from_records(
[
{
'sid': sid,
'symbol': 'TEST.%d' % sid,
'start_date': as_of.value,
'end_date': as_of.value,
'exchange': uuid.uuid4().hex
}
for sid in sids
]
)
self.write_assets(equities=frame)
assets = self.asset_finder.retrieve_equities(sids)
assert_equal(viewkeys(assets), set(sids))
def test_lookup_symbol_delimited(self):
as_of = pd.Timestamp('2013-01-01', tz='UTC')
frame = pd.DataFrame.from_records(
[
{
'sid': i,
'symbol': 'TEST.%d' % i,
'company_name': "company%d" % i,
'start_date': as_of.value,
'end_date': as_of.value,
'exchange': uuid.uuid4().hex
}
for i in range(3)
]
)
self.write_assets(equities=frame)
finder = self.asset_finder
asset_0, asset_1, asset_2 = (
finder.retrieve_asset(i) for i in range(3)
)
# we do it twice to catch caching bugs
for i in range(2):
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('TEST', as_of)
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('TEST1', as_of)
# '@' is not a supported delimiter
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('TEST@1', as_of)
# Adding an unnecessary fuzzy shouldn't matter.
for fuzzy_char in ['-', '/', '_', '.']:
self.assertEqual(
asset_1,
finder.lookup_symbol('TEST%s1' % fuzzy_char, as_of)
)
def test_lookup_symbol_fuzzy(self):
metadata = pd.DataFrame.from_records([
{'symbol': 'PRTY_HRD', 'exchange': "TEST"},
{'symbol': 'BRKA', 'exchange': "TEST"},
{'symbol': 'BRK_A', 'exchange': "TEST"},
])
self.write_assets(equities=metadata)
finder = self.asset_finder
dt = pd.Timestamp('2013-01-01', tz='UTC')
# Try combos of looking up PRTYHRD with and without a time or fuzzy
# Both non-fuzzys get no result
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('PRTYHRD', None)
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('PRTYHRD', dt)
# Both fuzzys work
self.assertEqual(0, finder.lookup_symbol('PRTYHRD', None, fuzzy=True))
self.assertEqual(0, finder.lookup_symbol('PRTYHRD', dt, fuzzy=True))
# Try combos of looking up PRTY_HRD, all returning sid 0
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', None))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', dt))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', None, fuzzy=True))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', dt, fuzzy=True))
# Try combos of looking up BRKA, all returning sid 1
self.assertEqual(1, finder.lookup_symbol('BRKA', None))
self.assertEqual(1, finder.lookup_symbol('BRKA', dt))
self.assertEqual(1, finder.lookup_symbol('BRKA', None, fuzzy=True))
self.assertEqual(1, finder.lookup_symbol('BRKA', dt, fuzzy=True))
# Try combos of looking up BRK_A, all returning sid 2
self.assertEqual(2, finder.lookup_symbol('BRK_A', None))
self.assertEqual(2, finder.lookup_symbol('BRK_A', dt))
self.assertEqual(2, finder.lookup_symbol('BRK_A', None, fuzzy=True))
self.assertEqual(2, finder.lookup_symbol('BRK_A', dt, fuzzy=True))
def test_lookup_symbol_change_ticker(self):
T = partial(pd.Timestamp, tz='utc')
metadata = pd.DataFrame.from_records(
[
# sid 0
{
'symbol': 'A',
'asset_name': 'Asset A',
'start_date': T('2014-01-01'),
'end_date': T('2014-01-05'),
'exchange': "TEST",
},
{
'symbol': 'B',
'asset_name': 'Asset B',
'start_date': T('2014-01-06'),
'end_date': T('2014-01-10'),
'exchange': "TEST",
},
# sid 1
{
'symbol': 'C',
'asset_name': 'Asset C',
'start_date': T('2014-01-01'),
'end_date': T('2014-01-05'),
'exchange': "TEST",
},
{
'symbol': 'A', # claiming the unused symbol 'A'
'asset_name': 'Asset A',
'start_date': T('2014-01-06'),
'end_date': T('2014-01-10'),
'exchange': "TEST",
},
],
index=[0, 0, 1, 1],
)
self.write_assets(equities=metadata)
finder = self.asset_finder
# note: these assertions walk forward in time, starting at assertions
# about ownership before the start_date and ending with assertions
# after the end_date; new assertions should be inserted in the correct
# locations
# no one held 'A' before 01
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('A', T('2013-12-31'))
# no one held 'C' before 01
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('C', T('2013-12-31'))
for asof in pd.date_range('2014-01-01', '2014-01-05', tz='utc'):
# from 01 through 05 sid 0 held 'A'
A_result = finder.lookup_symbol('A', asof)
assert_equal(
A_result,
finder.retrieve_asset(0),
msg=str(asof),
)
# The symbol and asset_name should always be the last held values
assert_equal(A_result.symbol, 'B')
assert_equal(A_result.asset_name, 'Asset B')
# from 01 through 05 sid 1 held 'C'
C_result = finder.lookup_symbol('C', asof)
assert_equal(
C_result,
finder.retrieve_asset(1),
msg=str(asof),
)
# The symbol and asset_name should always be the last held values
assert_equal(C_result.symbol, 'A')
assert_equal(C_result.asset_name, 'Asset A')
# no one held 'B' before 06
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('B', T('2014-01-05'))
# no one held 'C' after 06, however, no one has claimed it yet
# so it still maps to sid 1
assert_equal(
finder.lookup_symbol('C', T('2014-01-07')),
finder.retrieve_asset(1),
)
for asof in pd.date_range('2014-01-06', '2014-01-11', tz='utc'):
# from 06 through 10 sid 0 held 'B'
# we test through the 11th because sid 1 is the last to hold 'B'
# so it should ffill
B_result = finder.lookup_symbol('B', asof)
assert_equal(
B_result,
finder.retrieve_asset(0),
msg=str(asof),
)
assert_equal(B_result.symbol, 'B')
assert_equal(B_result.asset_name, 'Asset B')
# from 06 through 10 sid 1 held 'A'
# we test through the 11th because sid 1 is the last to hold 'A'
# so it should ffill
A_result = finder.lookup_symbol('A', asof)
assert_equal(
A_result,
finder.retrieve_asset(1),
msg=str(asof),
)
assert_equal(A_result.symbol, 'A')
assert_equal(A_result.asset_name, 'Asset A')
def test_lookup_symbol(self):
# Incrementing by two so that start and end dates for each
# generated Asset don't overlap (each Asset's end_date is the
# day after its start date.)
dates = pd.date_range('2013-01-01', freq='2D', periods=5, tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': i,
'symbol': 'existing',
'start_date': date.value,
'end_date': (date + timedelta(days=1)).value,
'exchange': 'NYSE',
}
for i, date in enumerate(dates)
]
)
self.write_assets(equities=df)
finder = self.asset_finder
for _ in range(2): # Run checks twice to test for caching bugs.
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('NON_EXISTING', dates[0])
with self.assertRaises(MultipleSymbolsFound):
finder.lookup_symbol('EXISTING', None)
for i, date in enumerate(dates):
# Verify that we correctly resolve multiple symbols using
# the supplied date
result = finder.lookup_symbol('EXISTING', date)
self.assertEqual(result.symbol, 'EXISTING')
self.assertEqual(result.sid, i)
def test_fail_to_write_overlapping_data(self):
df = pd.DataFrame.from_records(
[
{
'sid': 1,
'symbol': 'multiple',
'start_date': pd.Timestamp('2010-01-01'),
'end_date': pd.Timestamp('2012-01-01'),
'exchange': 'NYSE'
},
# Same as asset 1, but with a later end date.
{
'sid': 2,
'symbol': 'multiple',
'start_date': pd.Timestamp('2010-01-01'),
'end_date': pd.Timestamp('2013-01-01'),
'exchange': 'NYSE'
},
# Same as asset 1, but with a later start_date
{
'sid': 3,
'symbol': 'multiple',
'start_date': pd.Timestamp('2011-01-01'),
'end_date': pd.Timestamp('2012-01-01'),
'exchange': 'NYSE'
},
]
)
with self.assertRaises(ValueError) as e:
self.write_assets(equities=df)
self.assertEqual(
str(e.exception),
"Ambiguous ownership for 1 symbol, multiple assets held the"
" following symbols:\n"
"MULTIPLE:\n"
" intersections: (('2010-01-01 00:00:00', '2012-01-01 00:00:00'),"
" ('2011-01-01 00:00:00', '2012-01-01 00:00:00'))\n"
" start_date end_date\n"
" sid \n"
" 1 2010-01-01 2012-01-01\n"
" 2 2010-01-01 2013-01-01\n"
" 3 2011-01-01 2012-01-01"
)
def test_lookup_generic(self):
"""
Ensure that lookup_generic works with various permutations of inputs.
"""
with build_lookup_generic_cases(self.asset_finder_type) as cases:
for finder, symbols, reference_date, expected in cases:
results, missing = finder.lookup_generic(symbols,
reference_date)
self.assertEqual(results, expected)
self.assertEqual(missing, [])
def test_lookup_none_raises(self):
"""
If lookup_symbol is vectorized across multiple symbols, and one of them
is None, want to raise a TypeError.
"""
with self.assertRaises(TypeError):
self.asset_finder.lookup_symbol(None, pd.Timestamp('2013-01-01'))
def test_lookup_mult_are_one(self):
"""
Ensure that multiple symbols that return the same sid are collapsed to
a single returned asset.
"""
date = pd.Timestamp('2013-01-01', tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': 1,
'symbol': symbol,
'start_date': date.value,
'end_date': (date + timedelta(days=30)).value,
'exchange': 'NYSE',
}
for symbol in ('FOOB', 'FOO_B')
]
)
self.write_assets(equities=df)
finder = self.asset_finder
# If we are able to resolve this with any result, means that we did not
# raise a MultipleSymbolError.
result = finder.lookup_symbol('FOO/B', date + timedelta(1), fuzzy=True)
self.assertEqual(result.sid, 1)
def test_endless_multiple_resolves(self):
"""
Situation:
1. Asset 1 w/ symbol FOOB changes to FOO_B, and then is delisted.
2. Asset 2 is listed with symbol FOO_B.
If someone asks for FOO_B with fuzzy matching after 2 has been listed,
they should be able to correctly get 2.
"""
date = pd.Timestamp('2013-01-01', tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': 1,
'symbol': 'FOOB',
'start_date': date.value,
'end_date': date.max.value,
'exchange': 'NYSE',
},
{
'sid': 1,
'symbol': 'FOO_B',
'start_date': (date + timedelta(days=31)).value,
'end_date': (date + timedelta(days=60)).value,
'exchange': 'NYSE',
},
{
'sid': 2,
'symbol': 'FOO_B',
'start_date': (date + timedelta(days=61)).value,
'end_date': date.max.value,
'exchange': 'NYSE',
},
]
)
self.write_assets(equities=df)
finder = self.asset_finder
# If we are able to resolve this with any result, means that we did not
# raise a MultipleSymbolError.
result = finder.lookup_symbol(
'FOO/B',
date + timedelta(days=90),
fuzzy=True
)
self.assertEqual(result.sid, 2)
def test_lookup_generic_handle_missing(self):
data = pd.DataFrame.from_records(
[
{
'sid': 0,
'symbol': 'real',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
{
'sid': 1,
'symbol': 'also_real',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
# Sid whose end date is before our query date. We should
# still correctly find it.
{
'sid': 2,
'symbol': 'real_but_old',
'start_date': pd.Timestamp('2002-1-1', tz='UTC'),
'end_date': pd.Timestamp('2003-1-1', tz='UTC'),
'exchange': 'TEST',
},
# Sid whose start_date is **after** our query date. We should
# **not** find it.
{
'sid': 3,
'symbol': 'real_but_in_the_future',
'start_date': pd.Timestamp('2014-1-1', tz='UTC'),
'end_date': pd.Timestamp('2020-1-1', tz='UTC'),
'exchange': 'THE FUTURE',
},
]
)
self.write_assets(equities=data)
finder = self.asset_finder
results, missing = finder.lookup_generic(
['REAL', 1, 'FAKE', 'REAL_BUT_OLD', 'REAL_BUT_IN_THE_FUTURE'],
pd.Timestamp('2013-02-01', tz='UTC'),
)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].symbol, 'REAL')
self.assertEqual(results[0].sid, 0)
self.assertEqual(results[1].symbol, 'ALSO_REAL')
self.assertEqual(results[1].sid, 1)
self.assertEqual(results[2].symbol, 'REAL_BUT_OLD')
self.assertEqual(results[2].sid, 2)
self.assertEqual(len(missing), 2)
self.assertEqual(missing[0], 'FAKE')
self.assertEqual(missing[1], 'REAL_BUT_IN_THE_FUTURE')
def test_security_dates_warning(self):
# Build an asset with an end_date
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end,
exchange="TEST")
# Catch all warnings
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered
warnings.simplefilter("always")
equity_asset.security_start_date
equity_asset.security_end_date
equity_asset.security_name
# Verify the warning
self.assertEqual(3, len(w))
for warning in w:
self.assertTrue(issubclass(warning.category,
DeprecationWarning))
def test_map_identifier_index_to_sids(self):
# Build an empty finder and some Assets
dt = pd.Timestamp('2014-01-01', tz='UTC')
finder = self.asset_finder
asset1 = Equity(1, symbol="AAPL", exchange="TEST")
asset2 = Equity(2, symbol="GOOG", exchange="TEST")
asset200 = Future(200, symbol="CLK15", exchange="TEST")
asset201 = Future(201, symbol="CLM15", exchange="TEST")
# Check for correct mapping and types
pre_map = [asset1, asset2, asset200, asset201]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([1, 2, 200, 201], post_map)
for sid in post_map:
self.assertIsInstance(sid, int)
# Change order and check mapping again
pre_map = [asset201, asset2, asset200, asset1]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([201, 2, 200, 1], post_map)
def test_compute_lifetimes(self):
num_assets = 4
trading_day = self.trading_calendar.day
first_start = pd.Timestamp('2015-04-01', tz='UTC')
frame = make_rotating_equity_info(
num_assets=num_assets,
first_start=first_start,
frequency=trading_day,
periods_between_starts=3,
asset_lifetime=5
)
self.write_assets(equities=frame)
finder = self.asset_finder
all_dates = pd.date_range(
start=first_start,
end=frame.end_date.max(),
freq=trading_day,
)
for dates in all_subindices(all_dates):
expected_with_start_raw = full(
shape=(len(dates), num_assets),
fill_value=False,
dtype=bool,
)
expected_no_start_raw = full(
shape=(len(dates), num_assets),
fill_value=False,
dtype=bool,
)
for i, date in enumerate(dates):
it = frame[['start_date', 'end_date']].itertuples()
for j, start, end in it:
# This way of doing the checks is redundant, but very
# clear.
if start <= date <= end:
expected_with_start_raw[i, j] = True
if start < date:
expected_no_start_raw[i, j] = True
expected_with_start = pd.DataFrame(
data=expected_with_start_raw,
index=dates,
columns=frame.index.values,
)
result = finder.lifetimes(dates, include_start_date=True)
assert_frame_equal(result, expected_with_start)
expected_no_start = pd.DataFrame(
data=expected_no_start_raw,
index=dates,
columns=frame.index.values,
)
result = finder.lifetimes(dates, include_start_date=False)
assert_frame_equal(result, expected_no_start)
def test_sids(self):
# Ensure that the sids property of the AssetFinder is functioning
self.write_assets(equities=make_simple_equity_info(
[0, 1, 2],
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
))
self.assertEqual({0, 1, 2}, set(self.asset_finder.sids))
def test_lookup_by_supplementary_field(self):
equities = pd.DataFrame.from_records(
[
{
'sid': 0,
'symbol': 'A',
'start_date': | pd.Timestamp('2013-1-1', tz='UTC') | pandas.Timestamp |
"""
"""
__version__='192.168.3.11.dev1'
import sys
import os
import logging
import pandas as pd
import re
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
logger = logging.getLogger('PT3S')
try:
from PT3S import Rm
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Rm - trying import Rm instead ... maybe pip install -e . is active ...'))
import Rm
try:
from PT3S import Lx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Lx - trying import Lx instead ... maybe pip install -e . is active ...'))
import Lx
def addResVecToDfAlarmEreignisse(
dfAlarmEreignisse
,TCsLDSRes1= | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from .DensityFunctions import BaseDensityCalc
def raw_delta_calc(times):
'''
Given an array of times, this function calculates the deltas between them.
Arguments
---------
- times: array:
This is an array of times that will be used to calculate the deltas.
Returns
---------
- out: array:
This is an array of deltas.
'''
out = (times[1:] - times[:-1])*1e-9
out = out.astype(float)
return out
def single_location_delta(input_df, single_location,
columns={'time': 'time', 'location': 'location'}, recall_value=5,
return_as_list = False):
'''
This function takes the ```input_df``` and calculates the raw time delta between the single_location location time
and the time of the ```recall_value``` number of locations immediately before the single_location.
This does not separate on subject. Please pass data from a single subject into this function.
Arguments
---------
- input_df: pandas dataframe:
This is a dataframe that contains columns relating to the subject, time and location of sensor trigger.
- single_location: string:
This is the location value that you wish to calculate the delta to.
- columns: dictionary:
This is the dictionary with the column names in ```input_df``` for each of the values of data that we need
in our calculations.
This dictionary should be of the form:
```
{'time': column containing the times of sensor triggers,
'location': column containing the locations of the sensor triggers}
```
- recall_value: integer:
This is the number of previous locations to the single_location trigger
- return_as_list: bool:
This option allows the user to return a list of the dates and data if ```True```. This is
used internally by other functions.
Returns
---------
- out: dictionary:
This has the Timestamps of the dates as keys (for example: Timestamp('2021-05-05 00:00:00')) and the
arrays of deltas as values. The arrays of deltas are of shape ```(Nt, recall_value)``` where Nt is the
number of visits to ```single_location``` on a given day. If there are no ```single_location``` visits
found in the data, then an empty dictionary will be returned.
'''
time_column = columns['time']
location_column = columns['location']
# format the incoming data to ensure assumptions about structure are met
input_df[time_column] = pd.to_datetime(input_df[time_column], utc=True)
input_df = input_df.sort_values(time_column)
# find the indices of the data that match with the location we want to find the delta to
single_location_indices = np.where(input_df[location_column] == single_location)[0].reshape(-1, 1)
# making sure that the recall value is not more than the number of sensor triggers before the
# first single_location sensor trigger
if len(single_location_indices) == 0:
if return_as_list: return [], []
else: return {}
single_location_indices = single_location_indices[np.argmax(recall_value < single_location_indices):]
# indices of the sensor triggers that we need in our calculations
recall_indices = np.hstack([single_location_indices - i for i in range(recall_value + 1)])
# the times of the sensor triggers
recall_times = input_df[time_column].values[recall_indices]
# the delta between the times for each of the previous sensors to recall_value
recall_delta = (recall_times[:, 0, None] - recall_times[:, 1:]) * 1e-9
# the times of the single_location triggers
single_location_times = input_df[time_column].iloc[single_location_indices.reshape(-1, )]
# dates of the single_location triggers
single_location_dates = single_location_times.dt.date
# out dictionary
out = {}
if return_as_list:
date_list = []
data_list = []
for nd, date in enumerate(single_location_dates.unique()):
date_list.append(date)
data_to_add = recall_delta[single_location_dates.values == date].astype(float)
data_list.append(data_to_add)
return pd.to_datetime(date_list), data_list
else:
# creating the output dictionary
for date in single_location_dates.unique():
# saving the delta values for this date to the dictionary
out[pd.to_datetime(date)] = recall_delta[single_location_dates.values == date].astype(float)
return out
class TimeDeltaDensity(BaseDensityCalc):
'''
This function allows the user to calculate reverse percentiles on some data, given another
dataset.
'''
def __init__(self, save_baseline_array=True, sample=False, sample_size=10000,
seed=None, verbose=True):
BaseDensityCalc.__init__(self, save_baseline_array=save_baseline_array,
sample=sample, sample_size=sample_size, seed=seed, verbose=verbose)
return
def rp_single_location_delta(input_df, single_location, baseline_length_days = 7, baseline_offset_days = 0,
columns={'time': 'time', 'location': 'location'}, recall_value=5):
'''
This function takes the ```input_df``` and calculates the reverse percentage time delta between the ```single_location``` location time
and the time of the ```recall_value``` number of locations immediately before the ```single_location```. The baseline
for the reverse percentage calculation is defined by ```baseline_length_days``` and ```baseline_offset_days```.
For example:
With ```baseline_length_days = 7``` and ```baseline_offset_days = 1```, the rp deltas on the day
```pd.Timestamp('2021-06-29')``` are calculated using the deltas from
```pd.Timestamp('2021-06-21 00:00:00')``` to ```pd.Timestamp('2021-06-28 00:00:00')```.
This does not separate on subject. Please pass data from a single subject into this function.
NOTE: The reverse percentage is calculated based on all of the deltas coming into a location!
This means that the delta is agnostic to the "from" location.
Arguments
---------
- input_df: pandas dataframe:
This is a dataframe that contains columns relating to the time and location of sensor trigger.
- single_location: string:
This is the location value that you wish to calculate the delta to.
- baseline_length_days: integer:
This is the length of the baseline in days that will be used. This value is used when finding
the ```baseline_length_days``` complete days of ```single_location``` data to use as a baseline.
- baseline_offset_days: integer:
This is the offset to the baseline period. ```0``` corresponds to a time period ending the morning of the
current date being calculated on.
- columns: dictionary:
This is the dictionary with the column names in ```input_df``` for each of the values of data that we need
in our calculations.
This dictionary should be of the form:
```
{'time': column containing the times of sensor triggers,
'location': column containing the locations of the sensor triggers}
```
- recall_value: integer:
This is the number of previous locations to the single_location trigger
Returns
---------
- out: dictionary:
This has the Timestamps of the dates as keys (for example: Timestamp('2021-05-05 00:00:00')) and the
arrays of deltas as values. The arrays of deltas are of shape ```(Nt, recall_value)``` where Nt is the
number of visits to ```single_location``` on a given day.
'''
# column names
time_column = columns['time']
location_column = columns['location']
out = {}
# format the incoming data to ensure assumptions about structure are met
input_df[time_column] = pd.to_datetime(input_df[time_column], utc=True)
input_df = input_df.sort_values(time_column)
# getting the single location raw delta
date_list, data_list = single_location_delta(input_df, single_location, columns, recall_value, return_as_list=True)
# for each date
for nd, date in enumerate(date_list):
date = pd.to_datetime(date)
'''
if len(baseline_offset)>0:
baseline_start_tp = pd.to_datetime(date - pd.Timedelta(**baseline_length) - pd.Timedelta(**baseline_offset))
baseline_end_tp = pd.to_datetime(date - pd.Timedelta(**baseline_offset))
else:
baseline_start_tp = pd.to_datetime(date - pd.Timedelta(**baseline_length))
baseline_end_tp = pd.to_datetime(date)
'''
index_baseline_end = np.where(date_list <= date)[0][-1]
index_baseline_end = index_baseline_end - baseline_offset_days
index_baseline_start = index_baseline_end - baseline_length_days
if index_baseline_start < 0:
out[date] = -1*np.ones_like(data_list[nd])
continue
baseline_delta = np.vstack([data_list[index] for index in range(index_baseline_start, index_baseline_end)])
td = TimeDeltaDensity(save_baseline_array=True, sample=True, sample_size=10000,
seed=None, verbose=False)
td.fit(baseline_delta)
out[date] = td.transform(data_list[nd])
return out
def rp_location_delta(data, columns = {'time': 'start_date', 'location': 'location_name'}, baseline_length_days = 7,
baseline_offset_days = 0, all_loc_as_baseline = False):
'''
This funciton allows you to calculate the reverse percentage of the delta for each of the locations based on a baseline.
This function allows you to specify whether to calculate the rp values based on the deltas to the same location or
whether to calculate them using all locations.
Arguments
---------
- data: pandas dataframe:
This is the dataframe containing the time and locations that will be used to calculate the reverse
percentage deltas
- columns: dictionary:
This is the dictionary with the column names in ```input_df``` for each of the values of data that we need
in our calculations.
This dictionary should be of the form:
```
{'time': column containing the times of sensor triggers,
'location': column containing the locations of the sensor triggers}
```
- baseline_length_days: integer:
This is the length of the baseline in days that will be used. This value is used when finding
the ```baseline_length_days``` complete days of ```single_location``` data to use as a baseline.
- baseline_offset_days: integer:
This is the offset to the baseline period. ```0``` corresponds to a time period ending the morning of the
current date being calculated on.
- all_loc_as_baseline: bool:
This argument dictates whether all the locations are used as part of the calculationg for the reverse
percentage or if only the values from the ```to``` locations are used.
Returns
---------
- out: pandas dataframe:
This is the outputted data frame, complete with rp values.
'''
import time
time_col = columns['time']
location_col = columns['location']
data[time_col] = pd.to_datetime(data[time_col])
data = data.sort_values(time_col)
if all_loc_as_baseline:
times = data[time_col].values
raw_delta = raw_delta_calc(times)
locations = data[location_col].values
df_dict = {'from': locations[:-1], 'to': locations[1:], 'delta': raw_delta, time_col: times}
out = pd.DataFrame(dict([(k,pd.Series(v)) for k,v in df_dict.items()]))
out['date'] = out[time_col].dt.date
baseline_df = out.groupby(by='date')['delta'].apply(list).reset_index()
dates = baseline_df['date'].values
deltas = baseline_df['delta'].values
rp_col = []
for nd in range(dates.shape[0]):
date = dates[nd]
this_delta = deltas[nd]
index_baseline_end = np.where(dates <= date)[0][-1]
index_baseline_end = index_baseline_end - baseline_offset_days
index_baseline_start = index_baseline_end - baseline_length_days
if index_baseline_start < 0:
rp_col.extend([np.NAN]*len(this_delta))
else:
X_fit = np.hstack(deltas[index_baseline_start:index_baseline_end]).reshape(-1,1)
X_transform = np.asarray(this_delta).reshape(-1,1)
td = TimeDeltaDensity(sample = True, sample_size = 10000, seed = nd, verbose = False)
td.fit(X_fit)
rp_col.extend(td.transform(X_transform).reshape(-1,))
out['rp'] = rp_col
return out
else:
unique_locations = data[location_col].unique()
data['date'] = pd.to_datetime(data[time_col].dt.date)
rp_col = -1*np.ones(data.shape[0])
for location in unique_locations:
start_func = time.time()
delta_dict = rp_single_location_delta(input_df=data,
single_location=location,
baseline_length_days=7,
baseline_offset_days=0,
columns=columns,
recall_value=1)
end_func = time.time()
location_index = np.where(data[location_col] == location)[0]
for date in delta_dict:
deltas = delta_dict[pd.Timestamp(date)]
start_search = time.time()
index_add = location_index[np.where(data['date'].iloc[location_index] == | pd.Timestamp(date) | pandas.Timestamp |
import pandas as pd
from kf_pedigree.common import get_logger
from kf_pedigree.family import find_family_from_family_list
logger = get_logger(__name__, testing_mode=False)
def gender(x):
if isinstance(x, str):
if x.lower() == "male":
return "1"
elif x.lower() == "female":
return "2"
else:
return "0"
else:
return "0"
def case_control(row):
if row["is_proband"] or row["affected_status"]:
return "2"
elif row["is_proband"] is False:
return "1"
else:
return "0"
def parent(x):
if x != x:
return "0"
else:
return x
def build_report(
participants,
family_relationships,
use_external_ids=False,
api_or_db_url=None,
):
logger.info("Building Pedigree Report")
family = participants[["kf_id", "family_id"]]
fr = (
family_relationships.merge(
family, left_on="participant1", right_on="kf_id", how="left"
)
.drop_duplicates()
.drop(columns=["kf_id"])
)
fr["participant1_to_participant2_relation"] = fr[
"participant1_to_participant2_relation"
].str.lower()
fr["participant2_to_participant1_relation"] = fr[
"participant2_to_participant1_relation"
].str.lower()
pedi_1_2 = (
fr[
fr["participant1_to_participant2_relation"]
.str.lower()
.isin(["mother", "father"])
]
.drop(columns=["participant2_to_participant1_relation"])
.set_index(
[
"family_id",
"participant2",
"participant1_to_participant2_relation",
]
)["participant1"]
.unstack()
.reset_index()
.rename(columns={"participant2": "pt_id"})
)
pedi_2_1 = (
fr[
fr["participant2_to_participant1_relation"]
.str.lower()
.isin(["child"])
][["participant1", "family_id"]]
.drop_duplicates()
.rename(columns={"participant1": "pt_id"})
)
participant_info = participants[
["kf_id", "affected_status", "is_proband", "gender"]
].rename(columns={"kf_id": "pt_id"})
pedigree = (
| pd.concat([pedi_1_2, pedi_2_1]) | pandas.concat |
import os
import numpy as np
import pandas as pd
from pyuplift.utils import download_file
def download_hillstrom_email_marketing(
data_home=None,
url='http://www.minethatdata.com/Kevin_Hillstrom_MineThatData_E-MailAnalytics_DataMiningChallenge_2008.03.20.csv'
):
"""Downloading the Hillstrom Email Marketing dataset.
****************
Data description
****************
This dataset contains 64,000 customers who last purchased within twelve months.
The customers were involved in an e-mail test.
* 1/3 were randomly chosen to receive an e-mail campaign featuring Mens merchandise.
* 1/3 were randomly chosen to receive an e-mail campaign featuring Womens merchandise.
* 1/3 were randomly chosen to not receive an e-mail campaign.
During a period of two weeks following the e-mail campaign, results were tracked.
Your job is to tell the world if the Mens or Womens e-mail campaign was successful.
+--------------------------+------------+
|Features | 8 |
+--------------------------+------------+
|Treatment | 3 |
+--------------------------+------------+
|Samples total | 64,000 |
+--------------------------+------------+
|Average spend rate | 1.05091 |
+--------------------------+------------+
|Average visit rate | 0.14678 |
+--------------------------+------------+
|Average conversion rate | 0.00903 |
+--------------------------+------------+
More information about dataset you can find in
the `official paper <http://minethatdata.com/Stochastic_Solutions_E-Mail_Challenge_2008.04.30.pdf>`_.
+-----------------+----------------------------------------------------------------------------------+
| **Parameters** | | **data_home: string** |
| | | Specify another download and cache folder for the dataset. |
| | | By default the dataset will be stored in the data folder in the same folder. |
| | | **url: string** |
| | | The URL to file with data. |
+-----------------+----------------------------------------------------------------------------------+
| **Returns** | **None** |
+-----------------+----------------------------------------------------------------------------------+
"""
data_home, dataset_path = __get_data_home_dataset_file_paths(data_home)
if not os.path.isdir(data_home):
os.makedirs(data_home)
if not os.path.exists(dataset_path):
download_file(url, dataset_path)
def load_hillstrom_email_marketing(
data_home=None,
load_raw_data=False,
download_if_missing=True
):
"""Loading the Hillstrom Email Marketing dataset from the local file.
****************
Data description
****************
This dataset contains 64,000 customers who last purchased within twelve months.
The customers were involved in an e-mail test.
* 1/3 were randomly chosen to receive an e-mail campaign featuring Mens merchandise.
* 1/3 were randomly chosen to receive an e-mail campaign featuring Womens merchandise.
* 1/3 were randomly chosen to not receive an e-mail campaign.
During a period of two weeks following the e-mail campaign, results were tracked.
Your job is to tell the world if the Mens or Womens e-mail campaign was successful.
+--------------------------+------------+
|Features | 8 |
+--------------------------+------------+
|Treatment | 3 |
+--------------------------+------------+
|Samples total | 64,000 |
+--------------------------+------------+
|Average spend rate | 1.05091 |
+--------------------------+------------+
|Average visit rate | 0.14678 |
+--------------------------+------------+
|Average conversion rate | 0.00903 |
+--------------------------+------------+
More information about dataset you can find in
the `official paper <http://minethatdata.com/Stochastic_Solutions_E-Mail_Challenge_2008.04.30.pdf>`_.
Parameters
----------
load_raw_data : bool, default: False
The loading of raw or preprocessed data?
data_home : str, optional (default=None)
Specify another download and cache folder for the dataset.
By default the dataset will be stored in the data folder in the same folder.
download_if_missing : bool, optional (default=True)
Download the dataset if it is not downloaded.
Returns
-------
dataset : dict object with the following attributes:
dataset.description : str
Description of the Hillstrom email marketing dataset.
dataset.data : ndarray, shape (64000, 8)
Each row corresponding to the 8 feature values in order.
dataset.feature_names : list, size 8
List of feature names.
dataset.treatment : ndarray, shape (64000,)
Each value corresponds to the treatment.
dataset.target : numpy array of shape (64000,)
Each value corresponds to one of the outcomes. By default, it's `spend` outcome (look at `target_spend` below).
dataset.target_spend : numpy array of shape (64000,)
Each value corresponds to how much customers spent during a two-week outcome period.
dataset.target_visit : numpy array of shape (64000,)
Each value corresponds to whether people visited the site during a two-week outcome period.
dataset.target_conversion : numpy array of shape (64000,)
Each value corresponds to whether they purchased at the site (“conversion”) during a two-week outcome period.
"""
data_home, dataset_path = __get_data_home_dataset_file_paths(data_home)
if not os.path.exists(dataset_path):
if download_if_missing:
download_hillstrom_email_marketing(data_home)
else:
raise FileNotFoundError(
'The dataset does not exist. '
'Use `download_hillstrom_email_marketing` function to download the dataset.'
)
df = pd.read_csv(dataset_path)
if not load_raw_data:
df = __encode_data(df)
description = 'This dataset contains 64,000 customers who last purchased within twelve months. ' \
'The customers were involved in an e-mail test. ' \
'1/3 were randomly chosen to receive an e-mail campaign featuring Mens merchandise. ' \
'1/3 were randomly chosen to receive an e-mail campaign featuring Womens merchandise. ' \
'1/3 were randomly chosen to not receive an e-mail campaign. ' \
'During a period of two weeks following the e-mail campaign, results were tracked. ' \
'Your job is to tell the world if the Mens or Womens e-mail campaign was successful.'
drop_fields = ['spend', 'visit', 'conversion', 'segment']
data = {
'description': description,
'data': df.drop(drop_fields, axis=1).values,
'feature_names': np.array(list(filter(lambda x: x not in drop_fields, df.columns))),
'treatment': df['segment'].values,
'target': df['spend'].values,
'target_spend': df['spend'].values,
'target_visit': df['visit'].values,
'target_conversion': df['conversion'].values,
}
return data
def __encode_data(df):
df['history_segment'] = df['history_segment'].apply(lambda s: s.split(') ')[1])
col_name = 'zip_code'
df = | pd.get_dummies(df, columns=[col_name], prefix=col_name) | pandas.get_dummies |
import os
import trimesh
import numpy as np
import pandas as pd
from enum import Enum
from matplotlib import cm
from urdfpy import URDF, JointLimit
from tools.utils import io
# from tools.visualization import Viewer
# override attributes to make effort, velocity optional
JointLimit._ATTRIBS = {
'effort': (float, False),
'velocity': (float, False),
'lower': (float, False),
'upper': (float, False),
}
# set default values
JointLimit.effort = 1.0
JointLimit.velocity = 1000
class DatasetName(Enum):
SAPIEN = 0
SHAPE2MOTION = 1
MULTISCAN = 2
class JointType(Enum):
prismatic = 0
revolute = 1
fixed = -1
continuous = -1
floating = -1
planar = -1
def get_mesh_info(mesh_path):
mesh = trimesh.load(mesh_path, force='mesh')
assert isinstance(mesh, trimesh.base.Trimesh)
min_bound = mesh.bounds[0]
max_bound = mesh.bounds[1]
center = np.mean(mesh.bounds, axis=0)
scale = mesh.scale
mesh_info = {
'min_bound': min_bound.tolist(),
'max_bound': max_bound.tolist(),
'center': center.tolist(),
'scale': scale
}
return mesh_info
class DataLoader:
def __init__(self, cfg):
self.cfg = cfg
dataset_name = self.cfg.dataset.name
self.dataset_name = DatasetName[dataset_name] if isinstance(dataset_name, str) else dataset_name
self.dataset_dir = self.cfg.paths.preprocess.input_dir
self.stage1_input = self.cfg.paths.preprocess.stage1.input
self.render_dir = os.path.join(self.dataset_dir, self.stage1_input.render.folder_name)
self.motion_dir = os.path.join(self.dataset_dir, self.stage1_input.motion.folder_name)
self.data_info = pd.DataFrame()
def parse_input(self):
render_data_info = self.parse_render_input()
motion_data_info = self.parse_motion_input()
self.data_info = render_data_info.merge(motion_data_info, how='inner', on=['objectCat', 'objectId'])
selected_categories = self.data_info['objectCat'].isin(self.cfg.settings.categories) \
if len(self.cfg.settings.categories) > 0 else self.data_info['objectCat'].astype(bool)
selected_object_ids = self.data_info['objectId'].isin(self.cfg.settings.object_ids) \
if len(self.cfg.settings.object_ids) > 0 else self.data_info['objectId'].astype(bool)
selected_articulation_ids = self.data_info['articulationId'].isin(self.cfg.settings.articulation_ids) \
if len(self.cfg.settings.articulation_ids) > 0 else self.data_info['articulationId'].astype(bool)
self.data_info = self.data_info[selected_categories & selected_object_ids & selected_articulation_ids]
self.data_info = self.data_info.reset_index(drop=True)
def parse_render_input(self):
df_list = []
object_cats = os.listdir(self.render_dir)
# object categories
for object_cat in object_cats:
object_cat_path = os.path.join(self.render_dir, object_cat)
object_ids = io.alphanum_ordered_folder_list(object_cat_path)
# object instance ids
for object_id in object_ids:
object_id_path = os.path.join(object_cat_path, object_id)
articulation_ids = io.alphanum_ordered_folder_list(object_id_path)
# object with different articulations instance ids
for articulation_id in articulation_ids:
articulation_id_path = os.path.join(object_id_path, articulation_id)
depth_dir = os.path.join(articulation_id_path, self.stage1_input.render.depth_folder)
depth_frames = io.alphanum_ordered_file_list(depth_dir, ext=self.stage1_input.render.depth_ext)
mask_dir = os.path.join(articulation_id_path, self.stage1_input.render.mask_folder)
mask_frames = io.alphanum_ordered_file_list(mask_dir, ext=self.stage1_input.render.mask_ext)
metadata_file = self.stage1_input.render.metadata_file
num_renders = len(depth_frames)
df_row = pd.concat([pd.DataFrame(
[[object_cat, object_id, articulation_id, depth_frames[i], mask_frames[i],
metadata_file]],
columns=['objectCat', 'objectId', 'articulationId',
'depthFrame', 'maskFrame', 'metadata']) for i in range(num_renders)],
ignore_index=True)
df_list.append(df_row)
return | pd.concat(df_list, ignore_index=True) | pandas.concat |
import timeboard as tb
from timeboard.interval import Interval, _VoidInterval
from timeboard.workshift import Workshift
from timeboard.exceptions import (OutOfBoundsError, PartialOutOfBoundsError,
VoidIntervalError)
from timeboard.timeboard import _Location, OOB_LEFT, OOB_RIGHT, LOC_WITHIN
import datetime
import pandas as pd
import numpy as np
import pytest
def tb_12_days():
return tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0])
# 31 01 02 03 04 05 06 07 08 09 10 11 12
# 0 1 0 0 1 0 0 1 0 0 1 0 0
class TestIntervalLocatorFromReference(object):
def test_interval_locator_default(self):
clnd = tb_12_days()
assert clnd._get_interval_locs_from_reference(
None, False, False) == [_Location(0, LOC_WITHIN),
_Location(12, LOC_WITHIN)]
def test_interval_locator_with_two_ts(self):
clnd = tb_12_days()
assert clnd._get_interval_locs_from_reference(
('02 Jan 2017 15:00', '08 Jan 2017 15:00'), False, False) == [
_Location(2, LOC_WITHIN), _Location(8, LOC_WITHIN)]
# reverse is ok; it is taken care later in 'get_interval'
assert clnd._get_interval_locs_from_reference(
('08 Jan 2017 15:00', '02 Jan 2017 15:00'), False, False) == [
_Location(8, LOC_WITHIN), _Location(2, LOC_WITHIN)]
def test_interval_locator_with_with_excessive_item(self):
clnd = tb_12_days()
assert clnd._get_interval_locs_from_reference(
('02 Jan 2017 15:00','08 Jan 2017 15:00','something'), False,
False) == [_Location(2, LOC_WITHIN), _Location(8, LOC_WITHIN)]
def test_interval_locator_with_two_pd_ts(self):
clnd = tb_12_days()
assert clnd._get_interval_locs_from_reference(
(pd.Timestamp('02 Jan 2017 15:00'),
| pd.Timestamp('08 Jan 2017 15:00') | pandas.Timestamp |
import os
import uuid
from datetime import datetime
import pathlib
import shutil
from send2trash import send2trash
from bs4 import (BeautifulSoup, Comment)
import lxml # 不一定用,但与bs4解析网页时相关模块有联系,作为模块预装的提示吧
import pandas as pd
import re
from wordcloud import WordCloud
import jieba
NOTEINDEXCOLS= ["type","title","path","ctime","mtime","atime"
,"url","att_list","att_num","keywords"]
def getDir(dir):
d = os.path.abspath(dir)
if not os.path.exists(d):
os.makedirs(d) # 不同于mkdir makedirs方法可以巢式新建文件夹
return d
class Note():
"""
:::由于混在一起设计了稍微写下省的犯错:::
self.root = 笔记库根目录
self.index = 索引表本身
self.index_cols = 索引中的格列标题
self.index_path = 索引的保存路径
self.index_update_time = 索引的更新时间
self.id = id
# self.ext = 扩展名
self.title = 标题
self.type = 分类
self.keywords = 关键词
self.path = 笔记文件夹的路径 os.path.join(self.root, f"{self.type}/{self.id}/")
self.url = 外链
self.atime = atime access?
self.ctime = ctime create?
self.mtime = mtime modify
self.content = 内容本身
self.content_name = 内容的文件名 f"{self.title}.html"
self.content_path = 内容的保存路径
self.info = info内容本身
self.info_path = info的保存路径
self.att_path = 附件文件夹路径
"""
def __init__(self, root):
self.root = root
self.index_cols = NOTEINDEXCOLS
for d in ["Index","Draft","Archive","Trash"]:
getDir(os.path.join(root, d))
self.index_path = os.path.join(root, "Index/Note_index.json")
self.createIndex()
# index ########################
def createIndex(self):
if os.path.exists(self.index_path):
self.readIndex()
else:
self.index = pd.DataFrame(columns=self.index_cols)
self.writeIndex()
def writeIndex(self):
try:
self.index.to_json(self.index_path, orient='split') #可以保持读取json内容的顺序
#self.index.to_json(self.index_path)
except Exception as e:
print("索引表保存出错:", e)
def readIndex(self):
try:
self.index = pd.read_json(self.index_path, convert_dates=["atime","ctime","mtime"], orient='split')
#self.index = pd.read_json(self.index_path, convert_dates=["atime","ctime","mtime"])
except Exception as e:
print("索引表读取出错:", e)
def archiveIndex(self):
archive_dir = getDir(os.path.join(self.root, "Index/Note_archive"))
tstmp = str(datetime.timestamp(datetime.now()))
archive_name = f"({tstmp})".join(os.path.splitext(os.path.basename(self.index_path)))
#if not os.path.exists(self.index):
# 万一索引表被误删除时增加鲁棒性
try:
shutil.copy(self.index_path, os.path.join(archive_dir, archive_name))
except Exception as e:
print("索引表存档出错:", e)
else:
self.createIndex()
def getUpdateTime(self):
self.index_update_time = datetime.fromtimestamp(os.path.getmtime(self.index_path))
# def sortIndex(self, mode):
# ds = self.index.loc[self.index['type']==mode].copy()
# ds.sort_values(by='title', inplace=True)
# return ds
# 内容 ########################
def createContent(self):
self.content = f"<!DOCTYPE html><html><meta charset='utf-8'><head><title>{self.title}</title></head>"
self.content += f'<body>文件生成于{datetime.now().strftime("%Y-%m-%d_%H:%M:%S")},请开始记录吧</body></html>'
def writeContent(self):
try:
f = open(self.content_path, "w", encoding="utf-8")
f.write(self.content)
except Exception as e:
print("内容写入出错:", e)
f.close()
else:
f.close()
def readContent(self):
codecs= ("utf-8", "gb18030", "ASCII")
i = 0
for codec in codecs:
try:
f = open(self.content_path, "r", encoding=codec)
self.content = f.read()
except UnicodeDecodeError:
print("{}按{}读取错误".format(self.content_path, codec))
self.content = ""
else:
# print("按{}读取成功".format(codec)
i = 1
f.close()
if i==1:
break
if not self.content:
print("未能成功读取HTML文件:", self.content_path)
# info ##########################
def createInfo(self):
self.readAtt()
self.keywords = ";".join(re.split("[;;|,,]", self.keywords)) # 规整关键词分隔符
self.info = pd.Series([self.type, self.title, self.path, self.ctime, self.mtime, self.atime\
,self.url, self.att_list, self.att_num, self.keywords]
,index=self.index_cols)
def writeInfo(self):
self.createInfo()
self.info_path = os.path.join(self.path, "{}.info".format(os.path.splitext(self.content_name)[0]))
try:
self.info.to_json(self.info_path)
except Exception as e:
print("信息写入出错:", e)
def readInfo(self):
#self.info = pd.read_json(self.info_path, typ="Series")
self.info = | pd.read_json(self.info_path, typ="Series", convert_dates=["atime","ctime","mtime"]) | pandas.read_json |
"""test_ulogconv."""
from context import mathpandas as mpd
import pandas as pd
import numpy as np
from numpy.testing import assert_almost_equal
def test_norm_2d():
"""test pythagoras series."""
x = pd.Series([1, 2, 3, 4])
y = pd.Series([2, 3, 4, 5])
r = mpd.get_series_norm_2d(x, y, "test")
assert_almost_equal(r.iloc[0], 2.23606797749979)
assert_almost_equal(r.iloc[1], 3.605551275463989)
assert_almost_equal(r.iloc[2], 5.0)
assert_almost_equal(r.iloc[3], 6.4031242374328485)
assert r.name == "test_norm"
def test_tilt_from_attitude():
"""test tilt series."""
q0 = pd.Series([1]) # w
q1 = | pd.Series([0]) | pandas.Series |
# -*- coding: utf-8 -*-
import os
import dash
import pandas as pd
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
#a
app = dash.Dash(__name__)
server = app.server
people = | pd.read_csv("BNOCSFINAL1.csv") | pandas.read_csv |
import os, sys
import collections
import pprint
import pandas as pd
import pysam
class Call:
def __init__(self, call, quality = None, is_error = False):
self.call = call
self.quality = quality
self.is_error = is_error
self.is_indel = len(call) > 1
def get_call_for_pileup_read(pileup_read, ref = None):
if pileup_read.alignment.mapping_quality < 20:
return Call('_MAPQ', is_error = True)
elif pileup_read.alignment.is_secondary or pileup_read.alignment.is_supplementary or pileup_read.alignment.is_qcfail:
return Call('_FLAG', is_error = True)
elif pileup_read.alignment.is_duplicate:
return Call('_DUPE', is_error = True)
elif pileup_read.indel > 0:
quals = pileup_read.alignment.query_qualities[pileup_read.query_position:pileup_read.query_position+pileup_read.indel+1]
return Call(
pileup_read.alignment.query_sequence[pileup_read.query_position:pileup_read.query_position+pileup_read.indel+1],
1.0 * sum(quals) / len(quals)
)
elif pileup_read.indel < 0:
#print(ref, pileup_read.indel, len(ref), ref[0:-abs(pileup_read.indel)])
#if abs(pileup_read.indel) < len(ref):
# return ref[0:-abs(pileup_read.indel)]
#else:
# return '_DEL'
#hacky way to handle deletions...
return Call(
'%s-%d' % (pileup_read.alignment.query_sequence[pileup_read.query_position], abs(pileup_read.indel)),
sum(pileup_read.alignment.query_qualities[pileup_read.query_position:pileup_read.query_position+2]) / 2.0
)
elif pileup_read.is_del:
return Call('_DEL', is_error = True)
elif pileup_read.is_refskip:
return Call('_SKIP', is_error = True)
else:
return Call(pileup_read.alignment.query_sequence[pileup_read.query_position], pileup_read.alignment.query_qualities[pileup_read.query_position])
def get_read_calls(samfile, chrom, pos1, ref = None, max_depth = 1e7, calculate_stats = False):
read_calls = {}
read_stats = {}
for pileup_column in samfile.pileup(chrom, pos1-1, pos1, max_depth = max_depth, stepper = 'nofilter', truncate=True):
print(chrom, pos1, '->', pileup_column.reference_name, pileup_column.reference_pos, ' - found ', pileup_column.nsegments, 'alignments')
for pileup_read in pileup_column.pileups:
#ignore secondary and supplementary alignments
if pileup_read.alignment.is_secondary:
continue
if pileup_read.alignment.is_supplementary:
continue
assert not pileup_read.alignment.query_name in read_calls, 'encountered multiple alignments for single read?'
read_calls[pileup_read.alignment.query_name] = get_call_for_pileup_read(pileup_read, ref)
if calculate_stats:
read_stats[pileup_read.alignment.query_name] = {
'length': pileup_read.alignment.infer_query_length(),
'mismatches': pileup_read.alignment.get_tag('NM'),
'mapping_quality': pileup_read.alignment.mapping_quality,
'mean_baseq': 1.0 * sum(pileup_read.alignment.query_qualities) / len(pileup_read.alignment.query_qualities)
}
if calculate_stats:
return read_calls, read_stats
else:
return read_calls
"Get counts of how often each call was observed at each SNP"
def get_call_counts(samfile, snps):
snp_call_counts = {}
for snp in snps.itertuples():
snp_call_counts[snp.name] = collections.Counter()
for pileup_column in samfile.pileup(snp.CHROM, snp.POS-1, snp.POS, max_depth = 1e4, stepper = 'nofilter', truncate=True):
for pileup_read in pileup_column.pileups:
call = get_call_for_pileup_read(pileup_read) #, snp.REF
snp_call_counts[snp.name][call.call] += 1
return snp_call_counts
def get_allele_type(allele, snp):
if allele in snp.paternal and allele in snp.maternal:
return 'shared'
elif allele in snp.maternal:
return 'maternal'
elif allele in snp.paternal:
return 'paternal'
else:
return None
def get_mutation_allele_type(allele, mutation):
if allele == mutation['REF_processed']:
return 'wild-type'
elif allele == mutation['ALT_processed']:
return 'mutation'
else:
return None
def process_family(fam, fam_rows, bam_mask, snps_mask, subsample = None):
print()
print(fam, 'STARTING')
assert 'proband' in fam_rows['relationship'].values, 'need at least one proband'
snps_fn = snps_mask % fam
if not os.path.isfile(snps_fn):
raise Exception('%s: %s missing!' % (fam, snps_fn))
snps = pd.read_csv(snps_fn, sep='\t', dtype={'#CHROM': str, 'POS': int, 'ID': str, 'REF': str, 'ALT': str})
snps.rename(columns = { '#CHROM': 'CHROM' }, inplace=True)
snps.loc[~snps['CHROM'].str.startswith('chr'), 'CHROM'] = ['chr' + c for c in snps.loc[~snps['CHROM'].str.startswith('chr'), 'CHROM']]
snps['name'] = ['%s_%d' % (snp.CHROM, snp.POS) for snp in snps.itertuples()]
#the "calls" we actually get back from the pileup are just a single base, so if we have a deletion
#such as GA>G, what we actually see if a G with indel == 0 or indel == 1.
#thus, we need to adjust the REF/ALT we actually expect to see
#this is stupid, what we should really do is to process the pileups in a smarter way...
snps['REF_processed'] = [snp.REF[0] if len(snp.REF) > 1 else snp.REF for snp in snps.itertuples()]
snps['ALT_processed'] = ['%s-%d' % (snp.REF[0], len(snp.REF) - len(snp.ALT)) if len(snp.REF) > 1 else snp.ALT for snp in snps.itertuples()]
print(snps)
mutation = snps[snps.ID == 'mutation']
assert len(mutation) == 1, 'only one mutation allowed'
mutation = mutation.iloc[0]
background_snps_list = []
for offset in [10, 50, 100]:
for sign in [1, -1]:
background_snps_list.append(
pd.DataFrame([
{
'CHROM': snp.CHROM,
'POS': snp.POS + sign * offset,
'name': '{}_{}'.format(snp.name, sign * offset)
} for snp in snps.itertuples()
])
)
background_snps = | pd.concat(background_snps_list) | pandas.concat |
"""
The :mod:`hillmaker.bydatetime` module includes functions for computing occupancy,
arrival, and departure statistics by time bin of day and date.
"""
# Copyright 2022 <NAME>
#
import logging
import numpy as np
import pandas as pd
from pandas import DataFrame
from pandas import Series
from pandas import Timestamp
from datetime import datetime
from pandas.tseries.offsets import Minute
import hillmaker.hmlib as hmlib
CONST_FAKE_OCCWEIGHT_FIELDNAME = 'FakeOccWeightField'
CONST_FAKE_CATFIELD_NAME = 'FakeCatForTotals'
OCC_TOLERANCE = 0.02
# This should inherit level from root logger
logger = logging.getLogger(__name__)
def make_bydatetime(stops_df, infield, outfield,
start_analysis_np, end_analysis_np, catfield=None,
bin_size_minutes=60,
cat_to_exclude=None,
totals=1,
occ_weight_field=None,
edge_bins=1,
verbose=0):
"""
Create bydatetime table based on user inputs.
This is the table from which summary statistics can be computed.
Parameters
----------
stops_df: DataFrame
Stop data
infield: string
Name of column in stops_df to use as arrival datetime
outfield: string
Name of column in stops_df to use as departure datetime
start_analysis_np: numpy datetime64[ns]
Start date for the analysis
end_analysis_np: numpy datetime64[ns]
End date for the analysis
catfield : string, optional
Column name corresponding to the categories. If none is specified, then only overall occupancy is analyzed.
bin_size_minutes: int, default 60
Bin size in minutes. Should divide evenly into 1440.
cat_to_exclude: list of strings, default None
Categories to ignore
edge_bins: int, default 1
Occupancy contribution method for arrival and departure bins. 1=fractional, 2=whole bin
totals: int, default 1
0=no totals, 1=totals by datetime, 2=totals bydatetime as well as totals for each field in the
catfields (only relevant for > 1 category field)
occ_weight_field : string, optional (default=1.0)
Column name corresponding to the weights to use for occupancy incrementing.
verbose : int, default 0
The verbosity level. The default, zero, means silent mode.
Returns
-------
Dict of DataFrames
Occupancy, arrivals, departures by category by datetime bin
Examples
--------
bydt_dfs = make_bydatetime(stops_df, 'InTime', 'OutTime',
... datetime(2014, 3, 1), datetime(2014, 6, 30), 'PatientType', 60)
TODO
----
* Sanity checks on date ranges
* Formal test using short stay data
* Flow conservation checks
Notes
-----
References
----------
See Also
--------
"""
# Number of bins in analysis span
num_bins = hmlib.bin_of_span(end_analysis_np, start_analysis_np, bin_size_minutes) + 1
# Compute min and max of in and out times
min_intime = stops_df[infield].min()
max_intime = stops_df[infield].max()
min_outtime = stops_df[outfield].min()
max_outtime = stops_df[outfield].max()
logger.info(f"min of intime: {min_intime}")
logger.info(f"max of intime: {max_intime}")
logger.info(f"min of outtime: {min_outtime}")
logger.info(f"max of outtime: {max_outtime}")
# TODO - Add warnings here related to min and maxes out of whack with analysis range
# Occupancy weights
# If no occ weight field specified, create fake one containing 1.0 as values.
# Avoids having to check during dataframe iteration whether or not to use
# default occupancy weight.
if occ_weight_field is None:
occ_weight_vec = np.ones(len(stops_df.index), dtype=np.float64)
occ_weight_df = DataFrame({CONST_FAKE_OCCWEIGHT_FIELDNAME: occ_weight_vec})
stops_df = | pd.concat([stops_df, occ_weight_df], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import itertools
import math
import re
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
plt.style.use('seaborn-white')
class MAM:
"""
MAM (Marketing Attribution Models) is a class inspired on the R Package ‘GameTheoryAllocation’ from <NAME>
and ‘ChannelAttribution’ from <NAME> and <NAME> that was created to bring these concepts to
Python and to help us understand how the different marketing channels behave during the customer journey.
Parameters:
df = None by default, but should only be None if choosing to use a random dataframe. Otherwise,
it has to receive a Pandas dataframe;
time_till_conv_colname = None by default. Column name in the df containing the time in hours untill
the moment of the conversion. The column must have the same elements as the
channels_colname has.
Values could be on a list ou a string with a separator;
conversion_value = 1 by default. Integer that represents a monetary value of a 'conversion', can
also receive a string indicating the column name on the dataframe containing the
conversion values;
channels_colname = None by default. Column name in the df containing the different channels during the
customer journey. The column must have the same elements as the time_till_conv_colname
has.
Values could be on a list ou a string with a separator;
journey_with_conv_colname = None by default.
group_channels = False by default. Most important parameter on this class. This indicates the input
format of the dataframe.
True = Each row represents a user session that will be grouped
into a user journey;
False = Each row represents a user journey and the columns
group_channels_by_id_list = Empty list by default.
group_timestamp_colname = None by default.
create_journey_id_based_on_conversion = False by default.
path_separator = ' > ' by default. If using 'group_channels = True', this should match the separator
being used on the inputed dataframe in the channels_colname;
verbose = False by default. Internal parameter for printing while working with MAM;
random_df = False by default. Will create a random dataframe with testing purpose;
"""
def __init__(
self,
df=None,
time_till_conv_colname=None,
conversion_value=1,
channels_colname=None,
journey_with_conv_colname=None,
group_channels=False,
group_channels_by_id_list=[],
group_timestamp_colname=None,
create_journey_id_based_on_conversion = False,
path_separator=' > ',
verbose=False,
random_df=False):
self.verbose = verbose
self.sep = path_separator
self.group_by_channels_models = None
##########################################################
##### Section 0: Funcions needed to create the class #####
##########################################################
def journey_id_based_on_conversion(df,
group_id,
transaction_colname):
"""
Internal function that creates a journey_id column into a DF containing a User ID and Boolean column
that indicates if there has been a conversion on that instance
"""
df_temp = df.copy()
for i in group_id:
df_temp[i] = df_temp[i].apply(str)
#Converting bool column to int
df_temp['journey_id'] = df_temp[transaction_colname].map(lambda x: 0 if x == False else 1)
#Cumsum for each transaction to expand the value for the rows that did not have a transaction
df_temp['journey_id'] = df_temp.groupby(group_id)['journey_id'].cumsum()
#Subtracting 1 only for the row that had a transaction
t = df_temp['journey_id'] - 1
df_temp['journey_id'] = df_temp['journey_id'].where((df_temp[transaction_colname] == False), t).apply(str)
df_temp['journey_id'] = 'id:' + df_temp[group_id[0]] + '_J:' + df_temp['journey_id']
del t
return df_temp
def random_mam_data_frame(user_id = 300, k = 50000, conv_rate = 0.4):
import random
channels = ['Direct', 'Direct', 'Facebook', 'Facebook', 'Facebook',
'Google Search', 'Google Search', 'Google Search', 'Google Search', 'Google Display',
'Organic', 'Organic', 'Organic', 'Organic', 'Organic', 'Organic',
'Email Marketing', 'Youtube', 'Instagram']
has_transaction = ([True] * int(conv_rate * 100)) + ([False] * int((1 - conv_rate) * 100))
user_id = list(range(0, 700))
day = range(1, 30)
month = range(1, 12)
res = []
for i in [channels,has_transaction, user_id, day, month]:
res.append(random.choices(population=i, k=k))
df = | pd.DataFrame(res) | pandas.DataFrame |
import pandas as pd
import numpy as np
def append_times(df, st, et):
df.insert(0, 'START_TIME', st)
df.insert(1, 'STOP_TIME', et)
df = df.set_index(['START_TIME', 'STOP_TIME'])
return df
def offset(df, offset_in_secs, start_time_col=0, stop_time_col=None):
df_copy = df.copy(deep=True)
if start_time_col is not None:
start_time_col = df_copy.columns[start_time_col]
df_copy[start_time_col] = df_copy[start_time_col] + \
pd.Timedelta(offset_in_secs, unit='s')
if stop_time_col is not None:
stop_time_col = df_copy.columns[stop_time_col]
df_copy[stop_time_col] = df_copy[stop_time_col] + \
pd.Timedelta(offset_in_secs, unit='s')
return df_copy
def segment(df, start_time=None, stop_time=None, start_time_col=0,
stop_time_col=None):
if stop_time_col is None:
stop_time_col = start_time_col
if start_time is None:
start_time = df.iloc[0, start_time_col]
if stop_time is None:
stop_time = df.iloc[-1, stop_time_col]
if start_time_col == stop_time_col:
mask = (df.iloc[:, start_time_col] >= start_time) & (
df.iloc[:, stop_time_col] < stop_time)
return df[mask].copy(deep=True)
else:
mask = (df.iloc[:, start_time_col] <= stop_time) & (
df.iloc[:, stop_time_col] >= start_time)
subset_df = df[mask].copy(deep=True)
start_time_col = df.columns[start_time_col]
stop_time_col = df.columns[stop_time_col]
subset_df.loc[subset_df.loc[:, start_time_col] <
start_time, start_time_col] = start_time
subset_df.loc[subset_df.loc[:, stop_time_col] >
stop_time, stop_time_col] = stop_time
return subset_df
def segment_sensor(df, start_time=None, stop_time=None):
return segment(df, start_time=start_time, stop_time=stop_time)
def segment_annotation(df, start_time=None, stop_time=None):
return segment(df, start_time=start_time, stop_time=stop_time,
start_time_col=1, stop_time_col=2)
def start_time(df, start_time_col=0):
return df.iloc[0, start_time_col]
def end_time(df, stop_time_col=0):
return df.iloc[-1, stop_time_col]
def get_annotation_labels(df):
labels = df.iloc[:, 3].unique()
return np.sort(labels)
def append_edges(df, before_df=None, after_df=None, duration=120,
start_time_col=0, stop_time_col=0):
lbound_time = df.iloc[0, start_time_col]
rbound_time = df.iloc[-1, stop_time_col]
if before_df is not None:
ledge_df = segment(before_df,
start_time=lbound_time -
pd.Timedelta(duration, unit='s'),
stop_time=lbound_time,
start_time_col=start_time_col,
stop_time_col=stop_time_col)
else:
ledge_df = pd.DataFrame()
if after_df is not None:
redge_df = segment(after_df,
start_time=rbound_time,
stop_time=rbound_time +
pd.Timedelta(duration, unit='s'),
start_time_col=start_time_col,
stop_time_col=stop_time_col)
else:
redge_df = pd.DataFrame()
return | pd.concat((ledge_df, df, redge_df)) | pandas.concat |
import os
import glob
import numpy as np
import pylab as pl
import scipy.io as sio
# for_Jyotika.m
from copy import copy, deepcopy
import pickle
import matplotlib.cm as cm
import pdb
import h5py
import pandas as pd
import bct
from collections import Counter
import matplotlib.cm as cm
import sys
import seaborn as sns
import scipy.stats as sp_stats
sys.path.append("./common/")
import analyze as anal
data_dir = "./data/"
data_target_dir = "./data/"
fig_target_dir = "./Figure2/"
Fig2_panel_name = dict({"modularity_index":"H","participation_pos":"I","module_degree_zscore":"J","local_assortativity_pos_whole":"K"})
subtype = sys.argv[1]
ipsi_contra = sys.argv[2]
if subtype == "subtype":
if ipsi_contra == "n":
graph_prop_df = pd.read_csv(data_dir+"graph_properties_pandas_all.csv")
graph_prop_df_null = pd.read_csv(data_dir+"graph_properties_pandas_null_all.csv")
prop_names = ["data_type","modularity_index","participation_pos","module_degree_zscore","local_assortativity_pos_whole","gamma","names"]+[subtype]
non_string_columns = [ "modularity_index","participation_pos","module_degree_zscore","local_assortativity_pos_whole"]
elif ipsi_contra == "y":
graph_prop_df = pd.read_csv(data_dir+"graph_properties_pandas_sub_contra_ipsi_all.csv")
graph_prop_df_null = pd.read_csv(data_dir+"graph_properties_pandas_sub_contra_ipsi_all_null.csv")
prop_names = ["data_type","modularity_index","participation_pos_ipsi","participation_pos_contra","module_degree_zscore_ipsi","module_degree_zscore_contra","local_assortativity_pos_ipsi","local_assortativity_pos_contra","gamma","names"]+[subtype]
non_string_columns = [ "modularity_index","participation_pos_ipsi","participation_pos_contra","module_degree_zscore_ipsi","module_degree_zscore_contra","local_assortativity_pos_ipsi","local_assortativity_pos_contra"]
#elif ipsi_contra == "y":
# graph_properties_with_behavior_pandas_sub_ipsi_contra_all
elif subtype == "development":
graph_prop_df = pd.read_csv(data_dir+"graph_properties_pandas_days_all.csv")
graph_prop_df_null = | pd.read_csv(data_dir+"graph_properties_pandas_days_null_all.csv") | pandas.read_csv |
"""<NAME>., 2019 - 2020. All rights reserved."""
import os
import sys
import unittest
from unittest import mock
from io import StringIO
from test.test_support import TestResource
import pandas as pd
from pandas.util.testing import assert_frame_equal
from eaglevision.similarity_eagle import SimilarityEagle
class SimilarityEagleTestCase(unittest.TestCase):
""" Class to test the Similarity_eagle.py"""
@classmethod
def tearDown(cls):
filelist = [temp_f for temp_f in os.listdir(os.path.join(TestResource.report, "pattern_and_similarity_report"))]
for temp_f in filelist:
os.remove(os.path.join(TestResource.report, "pattern_and_similarity_report", temp_f))
def setUp(self):
""" Function used to setup the read the console out """
self.held, sys.stdout = sys.stdout, StringIO()
@staticmethod
def dummy_dataf():
""" Function which creates a dummy data frame for testing"""
data_f = {
"Uniq ID": [".cpp_BrowserProcessHandler", "cpp_BrowserProcessHandler"],
"Code": ["BrowserProcessHandler::BrowserProcessHandler(jobject app_handler) \n{\n\
assertequal = app_handler\n}", "BrowserProcessHandler::~BrowserProcessHandler() {\
base::AutoLock\
lock_scope(router_cfg_lock_)\
router_cfg_.clear()}"]
}
dataf = pd.DataFrame(data_f, columns=['Uniq ID', 'Code'])
return dataf
@staticmethod
def empty_dataf():
""" Function for testing purpose returns an empty dataframe"""
return pd.DataFrame()
def test__code_extraction__empty_dataframe_path(self):
""" Function to test the report folder creation, empty extraction handling """
similarityobj = SimilarityEagle()
self.assertEqual(similarityobj.dataframe, None)
self.assertEqual(similarityobj.report_path, None)
TestResource.input_json["run_similarity"] = False
mocked_class = mock.Mock()
mocked_class.return_value = self.empty_dataf()
with mock.patch('functiondefextractor.core_extractor.extractor', mocked_class):
similarityobj.orchestrate_similarity(TestResource.input_json)
out_str = (sys.stdout.getvalue().split('\n'))
matches = [c for c in out_str if 'No functions are extracted. Data frame is empty. Recheck your ' \
'input arguments' in c]
if len(list(filter(None, matches))): # pylint: disable= C1801
self.assertEqual(list(filter(None, matches))[0], 'No functions are extracted. Data frame is empty. '
'Recheck your input arguments')
else:
self.assertTrue(len(list(filter(None, matches))),
"mock is not called, no print seen") # pylint: disable= C1801
self.assertEqual(similarityobj.report_path, os.path.join(TestResource.report,
"pattern_and_similarity_report"))
self.assertTrue(mocked_class.called)
def test__code_extraction_non_empty_df(self):
""" Function to test the report folder creation, report file creation, non empty extraction handling """
similarityobj = SimilarityEagle()
TestResource.input_json["run_similarity"] = False
mocked_class = mock.Mock()
mocked_class.return_value = self.dummy_dataf()
with mock.patch('functiondefextractor.core_extractor.extractor', mocked_class):
similarityobj.orchestrate_similarity(TestResource.input_json)
out_str = (sys.stdout.getvalue().split('\n'))
matches = [c for c in out_str if
c in 'No functions are extracted. Data frame is empty. Recheck your input arguments']
self.assertEqual(len(list(filter(None, matches))), 0)
self.assertTrue(mocked_class.called)
self.assertEqual(similarityobj.report_path,
os.path.join(TestResource.report, "pattern_and_similarity_report"))
self.assertTrue(os.path.isfile(TestResource.get_result_file_name(
"pattern_and_similarity_report", "assertPivot")))
self.assertTrue(os.path.isfile(TestResource.get_result_file_name(
"pattern_and_similarity_report", "assert_pattern")))
actual_dataframe = pd.read_html(TestResource.get_result_file_name(
"pattern_and_similarity_report", "assertPivot"))
expected_dataframe = pd.read_html(os.path.join(TestResource.tst_resource_folder, "golden_assertPivot.html"))
assert_frame_equal(actual_dataframe[0], expected_dataframe[0], "check the assertion on html")
actual_dataframe = pd.read_excel(TestResource.get_result_file_name(
"pattern_and_similarity_report", "assert_pattern"), index_col=0)
expected_dataframe = pd.read_excel(os.path.join(TestResource.tst_resource_folder,
"golden_assert_pattern.xlsx"), index_col=0)
self.assertTrue(actual_dataframe['Uniq ID'].equals(expected_dataframe['Uniq ID']))
self.assertTrue(actual_dataframe['Code'].equals(expected_dataframe['Code']))
self.assertTrue(actual_dataframe['Count of assert in function'].
equals(expected_dataframe['Count of assert in function']))
actual_dataframe['assert Statements'].replace(to_replace=[r"\\t|\\n|\\r", "\t|\n|\r"],
value=["", ""], regex=True, inplace=True)
expected_dataframe['assert Statements'].replace(to_replace=[r"\\t|\\n|\\r", "\t|\n|\r"],
value=["", ""], regex=True, inplace=True)
self.assertTrue(actual_dataframe['assert Statements'].equals(expected_dataframe['assert Statements']))
def test__code_extraction_non_empty_df_no_pattern(self):
""" Function to test the non empty extraction handling with out pattern an no similarity check"""
similarityobj = SimilarityEagle()
mocked_class = mock.Mock()
mocked_class.return_value = self.dummy_dataf()
with mock.patch('functiondefextractor.core_extractor.extractor', mocked_class):
TestResource.input_json["pattern_match"] = None
TestResource.input_json["run_similarity"] = False
similarityobj.orchestrate_similarity(TestResource.input_json)
out_str = (sys.stdout.getvalue().split('\n'))
matches = [c for c in out_str if
'The pattern input is expected to be list and should be of same length as pattern ' \
'separators' in c]
self.assertEqual(len(list(filter(None, matches))), 1)
self.assertTrue(mocked_class.called)
self.assertFalse(os.path.isfile(os.path.join(TestResource.report, "pattern_and_similarity_report",
"assertPivot.html")))
self.assertFalse(os.path.isfile(os.path.join(TestResource.report, "pattern_and_similarity_report",
"assert_pattern.xlsx")))
def test__code_extraction_non_empty_df_pattern_and_separator(self):
""" Function to test the non empty extraction handling with out pattern and pattern separator and no similarity
check"""
similarityobj = SimilarityEagle()
mocked_class = mock.Mock()
mocked_class.return_value = self.dummy_dataf()
with mock.patch('functiondefextractor.core_extractor.extractor', mocked_class):
TestResource.input_json["pattern_match"] = ["assert", "print"]
TestResource.input_json["pattern_seperator"] = ["(", None]
TestResource.input_json["run_similarity"] = False
similarityobj.orchestrate_similarity(TestResource.input_json)
self.assertTrue(mocked_class.called)
self.assertTrue(os.path.isfile(TestResource.get_result_file_name(
"pattern_and_similarity_report", "assertPivot")))
self.assertTrue(os.path.isfile(TestResource.get_result_file_name(
"pattern_and_similarity_report", "printPivot")))
self.assertTrue(os.path.isfile(TestResource.get_result_file_name(
"pattern_and_similarity_report", "assert_pattern")))
self.assertTrue(os.path.isfile(TestResource.get_result_file_name(
"pattern_and_similarity_report", "print_pattern")))
def test_similarity_check_with_mock_extraction(self):
""" Function to test the similarity check"""
similarityobj = SimilarityEagle()
mocked_class = mock.Mock()
mocked_class.return_value = self.dummy_dataf()
with mock.patch('functiondefextractor.core_extractor.extractor', mocked_class):
TestResource.input_json["pattern_match"] = None
TestResource.input_json["run_similarity"] = True
TestResource.input_json["similarity_range"] = "0,100"
similarityobj.orchestrate_similarity(TestResource.input_json)
self.assertTrue(mocked_class.called)
self.assertEqual(similarityobj.report_path, os.path.join(TestResource.report,
"pattern_and_similarity_report"))
self.assertTrue(os.path.isfile(TestResource.get_result_file_name(
"pattern_and_similarity_report", "similarity_recommendation_0")))
self.assertTrue(os.path.isfile(TestResource.get_result_file_name(
"pattern_and_similarity_report",
"similarity_brief_report")))
actual_dataframe = pd.read_excel(TestResource.get_result_file_name(
"pattern_and_similarity_report", "similarity_recommendation_0"), index_col=0)
expected_dataframe = pd.read_excel(os.path.join(TestResource.tst_resource_folder,
"golden_similarity_recommendation_0.xlsx"), index_col=0)
self.assertTrue(actual_dataframe.equals(expected_dataframe))
actual_dataframe = pd.read_html(TestResource.get_result_file_name("pattern_and_similarity_report",
"similarity_brief_report"))
expected_dataframe = pd.read_html(os.path.join(TestResource.tst_resource_folder,
"golden_similarity_brief_report.html"))
| assert_frame_equal(actual_dataframe[0], expected_dataframe[0]) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/python
import warnings
warnings.filterwarnings("ignore")
import os,numpy,pandas,sys,scipy.io,scipy.sparse,time,numba
from optparse import OptionParser
#
#
opts = OptionParser()
usage = "Evaluate gene score by TSS peaks\nusage: %prog -s project --gtf hg19.gtf --distal 20000"
opts = OptionParser(usage=usage, version="%prog 1.0")
opts.add_option("-s", help="The project folder.")
opts.add_option("--gtf", default='../reference/hg19_RefSeq_genes.gtf',
help="gtf file for genome, default=../reference/hg19_RefSeq_genes.gtf")
opts.add_option("--distal", default=20000,
help="distal region around TSS for peak searching, default=20000")
options, arguments = opts.parse_args()
#
#
def get_tss_region(options):
mm10_df = pandas.read_csv(options.gtf, sep='\t', index_col=0)
genes = list(set(mm10_df['name2']))
genes.sort()
mm10_df.index = mm10_df['name']
names, tss = [], []
for symbol in genes:
sub_df = mm10_df.loc[mm10_df['name2']==symbol]
if len(sub_df.index.values)>=1:
chrom = list(set(sub_df['chrom'].values))
strand = list(set(sub_df['strand'].values))
if len(chrom)==1:
if strand[0]=='+':
starts = list(set(map(str, sub_df['txStart'].values)))
start = ','.join(starts)
elif strand[0]=='-':
starts = list(set(map(str, sub_df['txEnd'].values)))
start = ','.join(starts)
names.append(symbol)
tss.append([chrom[0], start])
tss = numpy.array(tss)
tss_df = pandas.DataFrame(tss, index=names, columns=['chrom', 'tss'])
tss_df.to_csv(options.s+'/peak/genes_tss_region.csv', sep='\t')
return
#
#
def get_tss_peaks(options):
peaks = [[x.split()[0], (int(x.split()[1])+int(x.split()[2]))/2]
for x in open(options.s+'/peak/top_filtered_peaks.bed').readlines()]
peaks_df = pandas.DataFrame(peaks, index=[str(x) for x in numpy.arange(0,len(peaks))],
columns=['chrom', 'center'])
tss_df = pandas.read_csv(options.s+'/peak/genes_tss_region.csv', sep='\t', index_col=0)
for gene in tss_df.index.values:
chrom, tsses = tss_df.ix[gene, 'chrom'], tss_df.ix[gene, 'tss']
tsses = map(int, tsses.split(','))
chr_peaks = peaks_df.loc[peaks_df['chrom']==chrom]
proxim_peaks, distal_peaks = [], []
for tss in tsses:
peaks1 = chr_peaks.loc[abs(chr_peaks['center']-tss)<=2000].index.values
peaks2 = chr_peaks.loc[abs(chr_peaks['center']-tss)<=int(options.distal)].index.values
proxim_peaks.extend(peaks1)
distal_peaks.extend(peaks2)
proxim_peaks = list(set(proxim_peaks))
distal_peaks = list(set(distal_peaks)-set(proxim_peaks))
if len(proxim_peaks)==0: proxim_peaks = ['NONE']
if len(distal_peaks)==0: distal_peaks = ['NONE']
proxim_peaks = ';'.join(proxim_peaks)
tss_df.ix[gene, 'proximal'] = proxim_peaks
distal_peaks = ';'.join(distal_peaks)
tss_df.ix[gene, 'distal'] = distal_peaks
tss_df.to_csv(options.s+'/peak/genes_tss_peaks.csv', sep='\t')
return
#
#
def get_score_from_peaks(options):
tss_df = | pandas.read_csv(options.s+'/peak/genes_tss_peaks.csv', sep='\t', index_col=0) | pandas.read_csv |
from bs4 import BeautifulSoup
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pypatent
import requests
from c0104_findLatLong import findLatLong
def search_pubs():
"""
Objective: List Rooster publication with metadata
Task 1: Identify search terms for google scholar
Task 2: Query
Task 3: Structure data
"""
print("running search_pubs")
search_terms = []
search_terms.append('RoosterBio')
search_terms.append('Multivascular networks and functional intravascular topologies within biocompatible hydrogels')
search_terms.append('Mesenchymal Stem Cell Perspective: Cell Biology to Clinical Progress')
search_terms.append('Mesenchymal Stromal Cell Exosomes Ameliorate Experimental Bronchopulmonary Dysplasia and Restore Lung Function through Macrophage Immunomodulation')
search_terms.append('Bone marrow-derived from the human femoral shaft as a new source of mesenchymal stem/stromal cells: an alternative cell material for banking and clinical')
search_terms.append('Rooster Basal-MSC')
search_terms.append('Rooster Nourish')
search_terms.append('Rooster MSC')
# Step 2: Scrape
# SerpApi search can only be done once a month
# search_serpapi(search_terms)
# Step 3: Structure data
# Parse the html to list all publications
parse_html()
# Step 4: Find GPS find gps coordinates
unique_addresses()
#
def search_serpapi(search_terms):
"""
Query serapapi
Save in a text file
"""
results = ''
for term in search_terms:
startNums = np.arange(0,50,2)
if len(term) > 30: startNums = np.arange(0,2,1)
for starts in startNums:
params = {
"api_key": "<KEY>",
"engine": "google_scholar",
"q": term,
"hl": "en",
"as_ylo": "2012",
"as_yhi": "2022",
"start": starts*10,
"num": starts*10+20,
}
search = GoogleSearch(params)
result = search.get_dict()
errorMessage = 'Your searches for the month are exhausted. You can upgrade plans on SerpApi.com website.'
if errorMessage in str(results): print('Error found:' + errorMessage)
errorMessage = 'Google hasn\'t returned any results for this query.'
if errorMessage in str(results): print('Error found:' + errorMessage)
if errorMessage not in str(results):
results = results + ' ' + str(result)
df = pd.DataFrame()
df['text'] = [results]
time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
pub_path = os.path.join('searchResults')
if not os.path.isdir(pub_path): os.mkdir(pub_path)
pub_path = os.path.join('searchResults', 'pubs')
if not os.path.isdir(pub_path): os.mkdir(pub_path)
pub_path = os.path.join('searchResults', 'pubs', 'scrapeSerpapi')
if not os.path.isdir(pub_path): os.mkdir(pub_path)
pub_file = os.path.join(pub_path, 'pubsRetrieved' + '_' + str(time) + '.csv')
df.to_csv(pub_file)
print('pubs saved: ' + pub_file)
print("completed search_pubs")
def parse_html():
"""
"""
pubSource = []
pub_path = os.path.join('searchResults', 'pubs', 'saved')
arr = os.listdir(pub_path)
for path in arr: pubSource.append(os.path.join(pub_path, path))
pub_path = os.path.join('searchResults', 'pubs', 'scrapeSerpapi')
arr = os.listdir(pub_path)
for path in arr: pubSource.append(os.path.join(pub_path, path))
# print('pubSource = ')
# print(pubSource)
textFull = ''
for file in pubSource:
with open(file, newline='') as csvfile:
for row in csvfile:
textFull = textFull + ' ' + row
positionSplit = textFull.split('{\'position\':')
tags = []
tags.append('titlePaper|\'title\': \'|\', \'')
tags.append('citations|\'cited_by\': {\'total\': |, \'')
tags.append('url| \'html_version\': \'|\'')
tags.append('scholarUrl|, \'link\': \' |\', \'cites_id\'')
tags.append('snippet| \'snippet\': \'|\', \'pu')
tags.append('info| \'publication_info\': |, \'resources\'')
tags.append('article| | ')
df = pd.DataFrame()
for tag in tags:
list = []
for article in positionSplit:
tagTitle, tagBegin, tagEnd = tag.split('|')
articleSplit = article.split(tagBegin)
if len(articleSplit) > 1:
articleSecondSplit = articleSplit[1]
articleThirdSplit = articleSecondSplit.split(tagEnd)
target = articleThirdSplit[0]
else: target = ''
if tagTitle == 'article': target = str(article)
if tagTitle == 'citations':
if len(target) > 0: target = int(target)
else: target = 0
list.append(target)
df[tagTitle] = list
df = df.sort_values(by=['citations'], ascending=False)
df = df.drop_duplicates(subset=['titlePaper', 'citations'])
df = df.reset_index()
del df['index']
print(df)
pub_path = os.path.join('searchResults', 'pubs')
if not os.path.isdir(pub_path): os.mkdir(pub_path)
pub_file = os.path.join(pub_path, 'pubsRooster' + '.csv')
df.to_csv(pub_file)
print('pubs saved: ' + pub_file)
def unique_addresses():
"""
List unique addresses
with the frequency they occur
and latitude / longitude
"""
pub_path = os.path.join('searchResults', 'pubs', 'manuallyCollected')
pub_file = os.path.join(pub_path, 'manuallyCollectedPubData' + '.csv')
df = | pd.read_csv(pub_file) | pandas.read_csv |
# -*- coding: UTF-8 -*-
"""
This module contains functions for calculating evaluation metrics for the generated service recommendations.
"""
import numpy
import pandas
runtime_metrics = ["Training time", "Overall testing time", "Individual testing time"]
quality_metrics = ["Recall", "Precision", "F1", "# of recommendations"]
def results_as_dataframe(user_actions, recommendations):
"""
Converts the recommendation results into a pandas dataframe for easier evaluation.
@param user_actions: A list of the actually performed user actions.
@param recommendations: For each of the performed actions the list of calculated service recommendations.
@return: A pandas dataframe that has as index the performed user actions (there is one row per action). The first
column contains for each action the highest scoring recommendation, the second column contains the second best
recommendation etc.
"""
results = pandas.DataFrame(recommendations, index=pandas.Index(user_actions, name="Actual action"))
results.columns = [(r+1) for r in range(len(results.columns))]
return results
class QualityMetricsCalculator():
"""
This is a utility class that contains a number of methods for calculating overall quality metrics for the produced
recommendations. In general these methods produce pandas dataframes with several rows, where each row corresponds
to one "cutoff" point. For example, a cutoff "4" means that the system cuts the number of recommendations at four,
i.e. the user is shown at most four recommendations. If some post-processing method was used (e.g. show fewer
recommendations if the recommendation conflict is low), then it can happen that fewer than four recommendations
are shown. For reference, the column "# of recommendations" lists the average of the number of recommendations that
were actually shown to the user.
"""
def __init__(self, actual_actions, recommendations):
"""
Initialize the calculation of the quality metrics..
@param actual_actions: A list of strings, each representing one actual user action.
@param recommendations: A list of lists of strings with the same length as actual_actions. Each list of
strings contains the calculated recommendations for the corresponding actual user action.
@return:
"""
self.results = results_as_dataframe(actual_actions, recommendations)
def __unique_actions__(self):
"""
It can happen that one potential user action never happened, but that the corresponding service was recommended.
To be able to count these false positives, we must calculate the list of all potential actions.
"""
occurring_actions = set(self.results.index.values)
occurring_services = pandas.melt(self.results).dropna()["value"]
occurring_services = set(occurring_services.unique())
return sorted(occurring_actions | occurring_services)
def true_positives(self, action):
"""
Counts how often the given action was recommended correctly (true positives, TP).
@param action: The name of the user action for which to count true positives.
@return: A pandas dataset with column TP and several rows, first row lists #TP at cutoff "1", the second row at
cutoff "2", etc.
"""
#get all rows where the actual action corresponds to the given action
r = self.results[self.results.index == action]
if len(r) == 0:
#if there are no such rows, then we have zero true positives, fill result dataframe with zeroes
true_positives = pandas.Series(0.0, index=self.results.columns)
else:
#if recommendation matches the action, set column to "1" (true positive), else set to "0" (false negative)
r = r.applymap(lambda col: 1 if col == action else 0).fillna(0)
#count how many true positives there are in each column
r = r.sum()
#if have a true positive for n-th recommendation, then also have true positive for n+1, n+2 etc
#-> calculate cumulative sum
true_positives = r.cumsum(axis=0).apply(float)
true_positives = pandas.DataFrame(true_positives, columns=["TP"])
true_positives.index.name = "cutoff"
return true_positives
def true_positives_for_all(self):
"""
Create a matrix that contains information about true positives for all possible actions.
@return: A pandas with one column for each action, first row lists #TP at cutoff "1", the second row at
cutoff "2", etc.
"""
tp = [self.true_positives(action) for action in self.__unique_actions__()]
tp = pandas.concat(tp, axis=1)
tp.columns = self.__unique_actions__()
return tp
def false_negatives(self, action):
"""
Counts how often the given action was not recommended correctly (false negatives, FN).
@param action: The name of the user action for which to count false negatives.
@return: A pandas dataset with column FN and several rows, first row lists #FN cutoff "1", the second row at
cutoff "2", etc.
"""
#the amount of false negatives corresponds to the difference between the total number of occurrences of the
#action and the number of false positives
true_positives = self.true_positives(action)
total_occurrences = len(self.results[self.results.index == action])
total_occurrences = pandas.Series(total_occurrences, index=true_positives.index)
false_negatives = total_occurrences - true_positives["TP"]
false_negatives = pandas.DataFrame(false_negatives, columns=["FN"])
false_negatives.index.name = "cutoff"
return false_negatives
def false_positives(self, action):
"""
Counts how often the given action was recommended even though it didn't occur (false positives, FP).
@param action: The name of the user action for which to count false positives.
@return: A pandas dataset with column FP and several rows, first row lists #FP at cutoff "1", the second row at
cutoff "2", etc.
"""
#get all rows where the actual service does NOT correspond to the given action
r = self.results[self.results.index != action]
if len(r) == 0:
#if there are no such rows, then we have zero false positives, fill result dataframe with zeroes
false_positives = pandas.Series(0.0, index=self.results.columns)
else:
#if recommendation matches the action, set column to "1" (false positive), else set to "0" (true negative)
r = r.applymap(lambda col: 1 if col == action else 0)
#count how many false positives there are in each column
r = r.sum()
#if have a false positive for n-th recommendation, then also have false positive for n+1, n+2 etc
#-> calculate cumulative sum
false_positives = r.cumsum(axis=0).apply(float)
false_positives = pandas.DataFrame(false_positives, columns=["FP"])
false_positives.index.name = "cutoff"
return false_positives
@staticmethod
def precision(counts):
"""
Calculate the precision as (true positives)/(true positives + false positives).
@param counts: A dataframe that contains a column "TP" with true positives and "FP" with false positives.
@return: A pandas dataframe with one column "Precision". The first row lists the achieved precision at cutoff
"1", the second row at cutoff "2", etc.
"""
p = counts["TP"]/(counts["TP"] + counts["FP"])
p = pandas.DataFrame({"Precision": p}).fillna(0.0)
return p
@staticmethod
def recall(counts):
"""
Calculate the recall as (true positives)/(true positives + false negatives).
@param counts: A dataframe that contains a column "TP" with true positives and "FN" with false negatives.
@return: A pandas dataframe with one column "Recall". The first row lists the achieved recall at cutoff "1",
the second row at cutoff "2", etc.
"""
p = counts["TP"]/(counts["TP"] + counts["FN"])
p = pandas.DataFrame({"Recall": p}).fillna(0.0)
return p
@staticmethod
def f1(metrics):
"""
Calculate the F1 as the harmonic mean of precision and recall.
@param metrics: A dataframe with a column "Precision" and a column "Recall"
@return: A pandas dataframe with one column "F1". The first row lists the achieved F1 at cutoff "1", the second
row at cutoff "2", etc.
"""
f = (2.0*metrics["Precision"]*metrics["Recall"]) / (metrics["Precision"]+metrics["Recall"])
f = pandas.DataFrame({"F1": f}).fillna(0.0)
return f
def number_of_recommendations(self):
"""
Count how many recommendations the user was actually shown (e.g. when using a dynamic cutoff such as "show
less recommendations when recommendation conflict is low").Number of recommendation is not an quality metric
but fits here conceptually.
@return: A pandas dataframe with one column "# of recommendations". The first row lists the # at cutoff "1", the
second row at cutoff "2", etc.
"""
n = (self.results.count(axis=0)/float(len(self.results))).cumsum()
n = pandas.DataFrame({"# of recommendations": n})
n.index.name = "cutoff"
return n
def calculate_for_action(self, action):
"""
Calculate precision, recall and F1 for one action (= one possible user action)
@param action: Which user action to calculate the metrics for.
@return: A pandas dataframe containing columns for "Precision", "Recall", "F1". The first row lists
calculated metrics at cutoff "1", the second row at cutoff "2", etc. A fourth column "action" simply lists the
action name in all rows, this column is necessary for later merging the metrics of all actions.
"""
#count how many true positives, false positives and false negatives occurred for this action
counts = pandas.concat([self.true_positives(action),
self.false_negatives(action),
self.false_positives(action)],
axis=1)
#use these counts to calculate the relevant metrics
metrics = pandas.concat([self.precision(counts),
self.recall(counts)],
axis=1)
metrics["F1"] = self.f1(metrics)["F1"]
#add column that contains name of the action in all rows, to prepare for merging the metrics for all actions
metrics["action"] = pandas.Series(action, index=metrics.index)
return metrics
def calculate(self):
"""
Performs the actual calculation of the weighted average of precision, recall and F1 over all actions and counts
the number of recommendations that where actually shown to the user.
@return: A pandas dataframe containing one column for each of the four quality metrics. The first row lists
calculated metrics at cutoff "1", the second row at cutoff "2"
"""
#make one big matrix with the metrics for all actions
actions = self.__unique_actions__()
metrics = pandas.concat([self.calculate_for_action(action) for action in actions])
#count for each action how often the corresponding action actually occurred
occurrences = pandas.TimeSeries(self.results.index.values).value_counts()
occurrences = occurrences.reindex(actions).fillna(0)
#calculate the weighted average for each of the metrics (i.e. actions that occur more often have a higher
#influence on the overall results for "Precision", "Recall and "F1")
actions_as_index = lambda group: group.set_index("action").reindex(actions).fillna(0.0)
weighted_average_for_column = lambda col: numpy.average(col.values, weights=occurrences.values)
weighted_average = lambda group: actions_as_index(group).apply(weighted_average_for_column)
metrics = metrics.groupby(level="cutoff").aggregate(weighted_average)
del(metrics["action"]) #get rid of now unnecessary "action" column
#do not need weighted average for # of recommendations, simply add counts as fourth column
metrics["# of recommendations"] = self.number_of_recommendations()
return metrics
def confusion_matrix(self):
"""
Calculate a confusion matrix: for each action count how often each service was recommended
@return: A pandas dataframe, with one row for each possible action and one row for each possible
service recommendation. Each matrix item counts how often the service was recommended when the action happened.
"""
cutoff = 1 #only makes sense for cutoff=1
def confusions_for_action(action):
r = self.results[self.results.index == action][cutoff]
return r.groupby(r).count()
actions = self.__unique_actions__()
matrix = [confusions_for_action(action) for action in actions]
matrix = | pandas.concat(matrix, axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
import os
import glob
import pandas as pd
import numpy as np
from collections import Counter
from graphpype.utils_net import read_Pajek_corres_nodes
from graphpype.utils_dtype_coord import where_in_coords
from graphpype.utils_cor import where_in_labels
from graphpype.utils_mod import read_lol_file
from graphpype.utils_mod import get_modularity_value_from_lol_file
from graphpype.utils_mod import get_values_from_global_info_file
from graphpype.utils_mod import get_path_length_from_info_dists_file
def glob_natural_sorted(reg_exp):
# TODO -> utils.py
"""sort reg_exp filenames in natural way (for numbers)"""
print(reg_exp)
files = glob.glob(reg_exp)
print(len(files))
natural_sorted_files = [reg_exp.replace(
'*', str(i), -1) for i in range(len(files))]
return natural_sorted_files, list(range(len(files)))
def compute_rada_df(iter_path, df, radatools_version="3.2", mapflow=[],
mapflow_name=""):
"""gather rada """
if radatools_version == "3.2":
net_prop_dir = "net_prop"
elif radatools_version == "4.0":
net_prop_dir = "prep_rada"
elif radatools_version == "5.0":
net_prop_dir = "net_prop"
elif radatools_version == "run":
net_prop_dir = ""
else:
print("Warning, could not find radatools_version {}"
.format(radatools_version))
return
# modularity
if len(mapflow) == 0:
modularity_file = os.path.join(
iter_path, "community_rada", "Z_List.lol")
print(modularity_file)
if os.path.exists(modularity_file):
mod_val = get_modularity_value_from_lol_file(modularity_file)
df['Modularity'] = mod_val
print(df)
# info_global
global_info_file = os.path.join(
iter_path, net_prop_dir, "Z_List-info_global.txt")
if os.path.exists(global_info_file):
global_info_values = get_values_from_global_info_file(
global_info_file)
df.update(global_info_values)
# info_dists
path_length_file = os.path.join(
iter_path, net_prop_dir, "Z_List-info_dists.txt")
if os.path.exists(path_length_file):
mean_path_length, diameter, global_efficiency = \
get_path_length_from_info_dists_file(path_length_file)
df['Mean_path_length'] = str(mean_path_length)
df['Diameter'] = str(diameter)
df['Global_efficiency'] = str(global_efficiency)
else:
print("Could not find file {}".format(path_length_file))
else:
df['Modularity'] = []
df[mapflow_name] = []
df['Mean_path_length'] = []
df['Diameter'] = []
df['Global_efficiency'] = []
for i, cond in enumerate(mapflow):
df[mapflow_name].append(cond)
modularity_file = os.path.join(
iter_path, "community_rada", "mapflow",
"_community_rada"+str(i), "Z_List.lol")
if os.path.exists(modularity_file):
mod_val = get_modularity_value_from_lol_file(modularity_file)
df['Modularity'].append(mod_val)
else:
print("Missing modularity file {}".format(modularity_file))
df['Modularity'].append(np.nan)
# info_global
global_info_file = os.path.join(
iter_path, net_prop_dir, "mapflow", "_" + net_prop_dir+str(i),
"Z_List-info_global.txt")
if os.path.exists(global_info_file):
global_info_values = get_values_from_global_info_file(
global_info_file)
for key, value in global_info_values.items():
if key not in list(df.keys()):
df[key] = []
df[key].append(value)
# info_dists
path_length_file = os.path.join(
iter_path, net_prop_dir, "mapflow", "_" + net_prop_dir+str(i),
"Z_List-info_dists.txt")
if os.path.exists(path_length_file):
mean_path_length, diameter, global_efficiency = \
get_path_length_from_info_dists_file(path_length_file)
df['Mean_path_length'].append(str(mean_path_length))
df['Diameter'].append(str(diameter))
df['Global_efficiency'].append(str(global_efficiency))
else:
df['Mean_path_length'].append(str(np.nan))
df['Diameter'].append(str(np.nan))
df['Global_efficiency'].append(str(np.nan))
def compute_nodes_rada_df(
local_dir, gm_coords=[], coords_file="", gm_labels=[], labels_file="",
radatools_version="3.2", mapflow=[], mapflow_name=""):
"""node properties df"""
if radatools_version == "3.2":
net_prop_dir = "net_prop"
elif radatools_version == "4.0":
net_prop_dir = "prep_rada"
elif radatools_version == "5.0":
net_prop_dir = "net_prop"
elif radatools_version == "run":
net_prop_dir = ""
else:
print("Warning, could not find radatools_version {}"
.format(radatools_version))
return
list_df = []
if len(mapflow) == 0:
Pajek_file = os.path.join(local_dir, net_prop_dir, "Z_List.net")
if os.path.exists(Pajek_file):
columns = []
columns_names = []
# nodes in the connected graph
node_corres = read_Pajek_corres_nodes(Pajek_file)
print(os.path.exists(coords_file))
if os.path.exists(coords_file) and len(gm_coords):
# MNI coordinates
coords = np.array(np.loadtxt(coords_file), dtype=int)
# node_coords
node_coords = coords[node_corres, :]
# where_in_gm_mask
where_in_gm_mask = where_in_coords(node_coords, gm_coords)
where_in_gm_mask = where_in_gm_mask.reshape(
where_in_gm_mask.shape[0], 1)
columns.append(where_in_gm_mask)
columns_names.append('Where_in_GM_mask')
if os.path.exists(labels_file):
labels = np.array(
[line.strip() for line in open(labels_file)],
dtype=str)
node_labels = labels[node_corres].reshape(-1, 1)
columns.append(node_coords)
columns_names.append('labels')
columns.append(node_coords)
columns_names.expend(['MNI_x', 'MNI_y', 'MNI_z'])
elif os.path.exists(labels_file) and len(gm_labels):
# TODO
labels = np.array([line.strip() for line in open(labels_file)],
dtype=str)
node_labels = labels[node_corres].reshape(-1, 1)
where_in_gm_mask = where_in_labels(node_labels, labels)
columns.append(where_in_gm_mask)
columns_names.append('Where_in_GM_mask')
columns.append(node_labels)
columns_names.append('labels')
0/0
elif len(gm_labels):
node_labels = np.array(gm_labels)[node_corres].reshape(-1, 1)
where_in_gm_mask = where_in_labels(node_labels,
gm_labels).reshape(-1, 1)
print(node_labels)
print(where_in_gm_mask)
columns.append(where_in_gm_mask)
columns_names.append('Where_in_GM_mask')
columns.append(node_labels)
columns_names.append('labels')
else:
print("No labels, no coords")
columns.append(node_corres)
columns_names.append('node_corres')
print(columns)
print(columns_names)
list_df.append(pd.DataFrame(
np.concatenate(tuple(columns), axis=1),
columns=columns_names))
else:
print("Missing {}".format(Pajek_file))
info_nodes_file = os.path.join(
local_dir, net_prop_dir, "Z_List-info_nodes.txt")
print(info_nodes_file)
if os.path.exists(info_nodes_file):
# loading info_nodes
df_node_info = pd.read_table(info_nodes_file)
list_df.append(df_node_info)
# modules /community_vect
partition_file = os.path.join(local_dir, "community_rada",
"Z_List.lol")
if os.path.exists(partition_file):
# loading partition_file
community_vect = read_lol_file(partition_file)
list_df.append(pd.DataFrame(community_vect, columns=['Module']))
# node roles
roles_file = os.path.join(local_dir, "node_roles", "node_roles.txt")
part_coeff_file = os.path.join(
local_dir, "node_roles", "all_participation_coeff.txt")
Z_com_degree_file = os.path.join(
local_dir, "node_roles", "all_Z_com_degree.txt")
if os.path.exists(roles_file) and os.path.exists(part_coeff_file) and \
os.path.exists(Z_com_degree_file):
# loding node roles
node_roles = np.array(np.loadtxt(roles_file), dtype=int)
part_coeff = np.loadtxt(part_coeff_file)
part_coeff = part_coeff.reshape(part_coeff.shape[0], 1)
Z_com_degree = np.loadtxt(Z_com_degree_file)
Z_com_degree = Z_com_degree.reshape(Z_com_degree.shape[0], 1)
list_df.append(pd.DataFrame(
np.concatenate((node_roles, part_coeff, Z_com_degree), axis=1),
columns=['Role_quality', 'Role_quantity',
'Participation_coefficient', 'Z_community_degree']))
# ndi values
ndi_values_file = os.path.join(
local_dir, "node_roles", "ndi_values.txt")
if os.path.exists(ndi_values_file):
# loding node roles
ndi_values = np.array(np.loadtxt(ndi_values_file))
list_df.append(pd.DataFrame(ndi_values,
columns=['Node_Dissociation_Index']))
else:
# Multiple files (mapflow)
for i, cond in enumerate(mapflow):
list_strip_df = []
Pajek_file = os.path.join(local_dir, "prep_rada", "mapflow",
"_prep_rada"+str(i), "Z_List.net")
if os.path.exists(coords_file) and os.path.exists(Pajek_file) and \
os.path.exists(labels_file):
# labels
labels = np.array([line.strip() for line in open(labels_file)],
dtype=str)
# MNI coordinates
coords = np.array(np.loadtxt(coords_file), dtype=int)
# nodes in the connected graph
node_corres = read_Pajek_corres_nodes(Pajek_file)
# node_coords
node_coords = coords[node_corres, :]
node_labels = labels[node_corres].reshape(-1, 1)
# where_in_gm_mask
where_in_gm_mask = where_in_coords(node_coords, gm_coords)
where_in_gm_mask = where_in_gm_mask.reshape(
where_in_gm_mask.shape[0], 1)
# print where_in_gm_mask
print(where_in_gm_mask.shape)
list_strip_df.append(pd.DataFrame(
np.concatenate((where_in_gm_mask, node_labels,
node_coords),
axis=1),
columns=['Where_in_GM_mask', 'labels', 'MNI_x', 'MNI_y',
'MNI_z']))
else:
if not os.path.exists(coords_file):
print("Missing {}".format(coords_file))
if not os.path.exists(Pajek_file):
print("Missing {}".format(Pajek_file))
if not os.path.exists(labels_file):
print("Missing {}".format(labels_file))
info_nodes_file = os.path.join(
local_dir, net_prop_dir, "Z_List-info_nodes.txt")
print(info_nodes_file)
if os.path.exists(info_nodes_file):
# loading info_nodes
df_node_info = pd.read_table(info_nodes_file)
list_strip_df.append(df_node_info)
# modules /community_vect
partition_file = os.path.join(
local_dir, "community_rada", "mapflow",
"_community_rada"+str(i), "Z_List.lol")
if os.path.exists(partition_file):
# loading partition_file
community_vect = read_lol_file(partition_file)
list_strip_df.append(pd.DataFrame(community_vect,
columns=['Module']))
# node roles
roles_file = os.path.join(local_dir, "node_roles", "mapflow",
"_node_roles"+str(i), "node_roles.txt")
part_coeff_file = os.path.join(
local_dir, "node_roles", "mapflow", "_node_roles"+str(i),
"all_participation_coeff.txt")
Z_com_degree_file = os.path.join(
local_dir, "node_roles", "mapflow", "_node_roles"+str(i),
"all_Z_com_degree.txt")
if os.path.exists(roles_file) and os.path.exists(part_coeff_file) \
and os.path.exists(Z_com_degree_file):
node_roles = np.array(np.loadtxt(roles_file), dtype=int)
part_coeff = np.loadtxt(part_coeff_file)
part_coeff = part_coeff.reshape(part_coeff.shape[0], 1)
Z_com_degree = np.loadtxt(Z_com_degree_file)
Z_com_degree = Z_com_degree.reshape(Z_com_degree.shape[0], 1)
list_strip_df.append(pd.DataFrame(
np.concatenate((node_roles, part_coeff, Z_com_degree),
axis=1),
columns=['Role_quality', 'Role_quantity',
'Participation_coefficient',
'Z_community_degree']))
# ndi values
ndi_values_file = os.path.join(
local_dir, "node_roles", "mapflow", "_node_roles"+str(i),
"ndi_values.txt")
if os.path.exists(ndi_values_file):
ndi_values = np.array(np.loadtxt(ndi_values_file))
list_strip_df.append(pd.DataFrame(
ndi_values, columns=['Node_Dissociation_Index']))
# Converting list_strip_df to df, and adding it to list_df
if len(list_strip_df):
nb_nodes = len(list_strip_df[0].index)
list_strip_df.append(pd.DataFrame([i]*nb_nodes,
columns=[mapflow_name]))
strip_df = pd.concat(list_strip_df, axis=1)
list_df.append(strip_df)
return list_df
def compute_signif_permuts(permut_df, permut_col="Seed",
session_col="Session", start_col=0, stop_col=0,
columns=[]):
"""
Computing permutation-based stats per nodes, over several sheetnames
args:
compute significance of permutation over a df generated by gather_permuts
permut_df: original permutation results (pandas Dataframe)
stop_col: last column to be included
(in fact, excluded except if value is 0, in this case goes to the last
column of the df)
return:
all_p_higher, all_p_lower: "vector of p_values obtained for 1 tail t-test
in both direction, first session - second session"
"""
seed_index = np.unique(permut_df[permut_col].values)
print(seed_index)
# should start with -1
if seed_index[0] != -1:
print("Error, permut_col {} should start with -1".format(permut_col))
return pd.DataFrame()
expected_permut_indexes = list(range(len(seed_index)-1))
nb_permuts = len(expected_permut_indexes)
print(nb_permuts)
# selecting columns
if len(columns) != 0:
data_cols = columns
else:
if stop_col == 0:
data_cols = permut_df.columns[start_col:]
else:
data_cols = permut_df.columns[start_col:stop_col]
print(data_cols)
# looping over selected columns
sum_higher = np.zeros(shape=(len(data_cols)), dtype='float64') - 1
sum_lower = np.zeros(shape=(len(data_cols)), dtype='float64') - 1
all_p_higher = np.zeros(shape=(len(data_cols)), dtype='float64') - 1
all_p_lower = np.zeros(shape=(len(data_cols)), dtype='float64') - 1
count_case = np.zeros(shape=(len(data_cols)), dtype='float64')
cols = []
if session_col == -1 or len(permut_df[session_col].unique()) == 1:
print("Compairing one session with itself")
for index_col, col in enumerate(data_cols):
sum_higher[index_col] = np.sum(
(permut_df[col].iloc[1:] >= permut_df[col].iloc[0])
.values.astype(int))
all_p_higher[index_col] = (
sum_higher[index_col]+1)/float(permut_df[col].shape[0])
sum_lower[index_col] = np.sum(
(permut_df[col].iloc[1:] <= permut_df[col].iloc[0])
.values.astype(int))
all_p_lower[index_col] = (sum_lower[index_col]+1) / \
float(permut_df[col].shape[0])
count_case[index_col] = permut_df[col].shape[0]
cols.append(str(col))
else:
# should start at 0 and have all values in between
if not all(x in seed_index[1:] for x in expected_permut_indexes):
print("Error, permut indexes should be consecutive and start with \
#0: {} ".format(expected_permut_indexes))
return pd.DataFrame()
print("Compairing diffences between two sessions")
# all unique values should have 2 different samples
count_elements = Counter(permut_df[permut_col].values)
# -1 should be represented Two times:
if not count_elements[-1] == 2:
print("-1 should be represented Two times")
return pd.DataFrame()
if not all(val == 2 for val in list(count_elements.values())):
print("Warning, all permut indexes should have 2 lines: {}"
.format(count_elements))
# computing diff df
for index_col, col in enumerate(data_cols):
df_col = permut_df.pivot(
index=permut_col, columns=session_col, values=col)
df_col["Diff"] = pd.to_numeric(
df_col.iloc[:, 0]) - pd.to_numeric(df_col.iloc[:, 1])
diff_col = df_col["Diff"].dropna().reset_index(drop=True)
if diff_col.shape[0] == 0:
sum_higher[index_col] = np.nan
sum_lower[index_col] = np.nan
all_p_higher[index_col] = np.nan
all_p_lower[index_col] = np.nan
cols.append(col)
continue
if diff_col[0] > 0:
sum_higher[index_col] = np.sum(
np.array(diff_col[1:] >= diff_col[0], dtype=int))
print(col, "sum_higher:", sum_higher[index_col])
all_p_higher[index_col] = \
(sum_higher[index_col]+1)/float(diff_col.shape[0])
elif diff_col[0] < 0:
sum_lower[index_col] = np.sum(
np.array(diff_col[1:] <= diff_col[0], dtype=int))
print(col, "sum_lower:", sum_lower[index_col])
all_p_lower[index_col] = \
(sum_lower[index_col]+1)/float(diff_col.shape[0])
else:
print("not able to do diff")
count_case[index_col] = diff_col.shape[0]
cols.append(col)
df_res = pd.DataFrame([sum_higher, sum_lower, all_p_higher,
all_p_lower, count_case],
columns=cols)
df_res.index = ["Sum Higher", "Sum Lower", "Pval Higher",
"Pval Lower", "Count"]
return df_res
def compute_signif_node_prop(orig_df, list_permut_df, columns):
"""signif node properties"""
permut_df = pd.concat(list_permut_df, axis=0)
all_frac_higher = []
for col in columns:
assert col in orig_df.columns, \
"Error, {} not in orig columns {}".format(col, orig_df.columns)
assert col in permut_df.columns, \
"Error, {} not in permut columns {}".format(col, permut_df.columns)
def sum_higher(a, b):
def func(el):
return np.sum(el[0] < b.values)
return np.apply_along_axis(func, 1, a[:, None])
frac_higher = np.array(
sum_higher(orig_df[col], permut_df[col])+1,
dtype=float)/float(len(permut_df.index) + 1)
all_frac_higher.append(frac_higher)
df_signif = pd.DataFrame(np.transpose(
np.array(all_frac_higher)), columns=columns)
return df_signif
def gather_diff_con_values(res_path, cond, nb_permuts, labels):
"""gather con values"""
if isinstance(cond, tuple):
# si plusieurs conditions = IRMf
df_filename = os.path.join(
res_path, "permuts_" + ".".join(cond) + '_con_values.csv')
else:
# si une seule valeur
df_filename = os.path.join(
res_path, "permuts_" + cond + '_con_values.csv')
if not os.path.exists(df_filename):
# pair of labels and tri triu_indices
triu_indices_i, triu_indices_j = np.triu_indices(len(labels), k=1)
pair_labels = [labels[i] + "_" + labels[j]
for i, j in zip(triu_indices_i.tolist(),
triu_indices_j.tolist())]
print(pair_labels)
print(len(pair_labels))
# creating dataframe
all_vect_cormats = []
all_global_info_values = []
for seed in range(-1, nb_permuts):
print(seed)
for sess in ['1', '2']:
print(sess)
dict_global_info_values = {'Session': sess, 'Seed': seed}
all_global_info_values.append(dict_global_info_values)
# avg_cormat
if isinstance(cond, tuple):
iter_dir = "_cond_" + \
".".join(cond) + "_permut_" + str(seed)
else:
iter_dir = "_freq_band_name_" + \
cond + "_permut_" + str(seed)
avg_cormat_file = os.path.join(
res_path, iter_dir,
"prepare_mean_correl" + sess, "avg_cormat.npy")
print(avg_cormat_file)
if os.path.exists(avg_cormat_file):
avg_cormat = np.load(avg_cormat_file)
vect_avg_cormat = avg_cormat[triu_indices_i,
triu_indices_j]
all_vect_cormats.append(vect_avg_cormat)
df_info = pd.DataFrame(all_global_info_values)
df_con = pd.DataFrame(all_vect_cormats, columns=pair_labels)
df = pd.concat((df_info, df_con), axis=1)
df.to_csv(df_filename, index_col=0)
else:
df = pd.read_csv(df_filename, index_col=0)
return df
def gather_con_values(res_path, cond, nb_permuts, labels):
import os
if isinstance(cond, tuple):
# si plusieurs conditions = IRMf
df_filename = os.path.join(
res_path, "permuts_" + ".".join(cond) + '_con_values.csv')
else:
# si une seule valeur
df_filename = os.path.join(
res_path, "permuts_" + cond + '_con_values.csv')
if not os.path.exists(df_filename):
# pair of labels and tri triu_indices
triu_indices_i, triu_indices_j = np.triu_indices(len(labels), k=1)
pair_labels = [labels[i] + "_" + labels[j]
for i, j in zip(triu_indices_i.tolist(),
triu_indices_j.tolist())]
# creating dataframe
all_vect_cormats = []
all_global_info_values = []
for seed in range(-1, nb_permuts):
dict_global_info_values = {'Seed': seed}
all_global_info_values.append(dict_global_info_values)
# avg_cormat
if isinstance(cond, tuple):
iter_dir = "_cond_" + ".".join(cond) + "_permut_" + str(seed)
else:
iter_dir = "_freq_band_name_" + cond + "_permut_" + str(seed)
avg_cormat_file = os.path.join(
res_path, iter_dir, "shuffle_matrix", "shuffled_matrix.npy")
if os.path.exists(avg_cormat_file):
avg_cormat = np.load(avg_cormat_file)
avg_cormat = avg_cormat + np.transpose(avg_cormat)
vect_avg_cormat = avg_cormat[triu_indices_i, triu_indices_j]
all_vect_cormats.append(vect_avg_cormat)
else:
print("Warning, could not find file \
{}".format(avg_cormat_file))
df_info = | pd.DataFrame(all_global_info_values) | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize("align_axis", [0, 1, "index", "columns"])
def test_compare_axis(align_axis):
# GH#30429
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
columns=["col1", "col2", "col3"],
)
df2 = df.copy()
df2.loc[0, "col1"] = "c"
df2.loc[2, "col3"] = 4.0
result = df.compare(df2, align_axis=align_axis)
if align_axis in (1, "columns"):
indices = pd.Index([0, 2])
columns = pd.MultiIndex.from_product([["col1", "col3"], ["self", "other"]])
expected = pd.DataFrame(
[["a", "c", np.nan, np.nan], [np.nan, np.nan, 3.0, 4.0]],
index=indices,
columns=columns,
)
else:
indices = pd.MultiIndex.from_product([[0, 2], ["self", "other"]])
columns = pd.Index(["col1", "col3"])
expected = pd.DataFrame(
[["a", np.nan], ["c", np.nan], [np.nan, 3.0], [np.nan, 4.0]],
index=indices,
columns=columns,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"keep_shape, keep_equal",
[
(True, False),
(False, True),
(True, True),
# False, False case is already covered in test_compare_axis
],
)
def test_compare_various_formats(keep_shape, keep_equal):
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
columns=["col1", "col2", "col3"],
)
df2 = df.copy()
df2.loc[0, "col1"] = "c"
df2.loc[2, "col3"] = 4.0
result = df.compare(df2, keep_shape=keep_shape, keep_equal=keep_equal)
if keep_shape:
indices = pd.Index([0, 1, 2])
columns = pd.MultiIndex.from_product(
[["col1", "col2", "col3"], ["self", "other"]]
)
if keep_equal:
expected = pd.DataFrame(
[
["a", "c", 1.0, 1.0, 1.0, 1.0],
["b", "b", 2.0, 2.0, 2.0, 2.0],
["c", "c", np.nan, np.nan, 3.0, 4.0],
],
index=indices,
columns=columns,
)
else:
expected = pd.DataFrame(
[
["a", "c", np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, 3.0, 4.0],
],
index=indices,
columns=columns,
)
else:
indices = pd.Index([0, 2])
columns = pd.MultiIndex.from_product([["col1", "col3"], ["self", "other"]])
expected = pd.DataFrame(
[["a", "c", 1.0, 1.0], ["c", "c", 3.0, 4.0]], index=indices, columns=columns
)
tm.assert_frame_equal(result, expected)
def test_compare_with_equal_nulls():
# We want to make sure two NaNs are considered the same
# and dropped where applicable
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
columns=["col1", "col2", "col3"],
)
df2 = df.copy()
df2.loc[0, "col1"] = "c"
result = df.compare(df2)
indices = pd.Index([0])
columns = pd.MultiIndex.from_product([["col1"], ["self", "other"]])
expected = pd.DataFrame([["a", "c"]], index=indices, columns=columns)
tm.assert_frame_equal(result, expected)
def test_compare_with_non_equal_nulls():
# We want to make sure the relevant NaNs do not get dropped
# even if the entire row or column are NaNs
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
columns=["col1", "col2", "col3"],
)
df2 = df.copy()
df2.loc[0, "col1"] = "c"
df2.loc[2, "col3"] = np.nan
result = df.compare(df2)
indices = pd.Index([0, 2])
columns = pd.MultiIndex.from_product([["col1", "col3"], ["self", "other"]])
expected = pd.DataFrame(
[["a", "c", np.nan, np.nan], [np.nan, np.nan, 3.0, np.nan]],
index=indices,
columns=columns,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("align_axis", [0, 1])
def test_compare_multi_index(align_axis):
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]}
)
df.columns = pd.MultiIndex.from_arrays([["a", "a", "b"], ["col1", "col2", "col3"]])
df.index = pd.MultiIndex.from_arrays([["x", "x", "y"], [0, 1, 2]])
df2 = df.copy()
df2.iloc[0, 0] = "c"
df2.iloc[2, 2] = 4.0
result = df.compare(df2, align_axis=align_axis)
if align_axis == 0:
indices = pd.MultiIndex.from_arrays(
[["x", "x", "y", "y"], [0, 0, 2, 2], ["self", "other", "self", "other"]]
)
columns = pd.MultiIndex.from_arrays([["a", "b"], ["col1", "col3"]])
data = [["a", np.nan], ["c", np.nan], [np.nan, 3.0], [np.nan, 4.0]]
else:
indices = | pd.MultiIndex.from_arrays([["x", "y"], [0, 2]]) | pandas.MultiIndex.from_arrays |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas.compat import long
from pandas.core.arrays import PeriodArray, DatetimeArrayMixin as DatetimeArray
@pytest.fixture(params=[1, np.array(1, dtype=np.int64)])
def one(request):
# zero-dim integer array behaves like an integer
return request.param
zeros = [box_cls([0] * 5, dtype=dtype)
for box_cls in [pd.Index, np.array]
for dtype in [np.int64, np.uint64, np.float64]]
zeros.extend([np.array(0, dtype=dtype)
for dtype in [np.int64, np.uint64, np.float64]])
zeros.extend([0, 0.0, long(0)])
@pytest.fixture(params=zeros)
def zero(request):
# For testing division by (or of) zero for Index with length 5, this
# gives several scalar-zeros and length-5 vector-zeros
return request.param
# ------------------------------------------------------------------
# Vector Fixtures
@pytest.fixture(params=[pd.Float64Index(np.arange(5, dtype='float64')),
pd.Int64Index(np.arange(5, dtype='int64')),
pd.UInt64Index(np.arange(5, dtype='uint64')),
pd.RangeIndex(5)],
ids=lambda x: type(x).__name__)
def numeric_idx(request):
"""
Several types of numeric-dtypes Index objects
"""
return request.param
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return pd.Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
# ------------------------------------------------------------------
# Scalar Fixtures
@pytest.fixture(params=[pd.Timedelta('5m4s').to_pytimedelta(),
| pd.Timedelta('5m4s') | pandas.Timedelta |
#!/usr/bin/env python
# coding: utf-8
# # US Beveridge Curve Data
#
# Construct monthly unemploment rate and vacancy rate series for the US from April 1929 through the most recently available date. The methodology is based on the approach described in Petrosky-Nadeau and Zhang (2013): https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2241695
#
# 1. This Notebook is compatible with Python 2 and 3.
#
# 2. **To use this notebook to download the entire dataset, you need the X-13ARIMA-SEATS binary**. If you don't have the binary, set variable `x_13` to `False`. Data that require seasonal adjustment will be loaded from the `txt` directory of the parent directory to this program.
#
# Binaries for Windows and Linux/Unix are available from https://www.census.gov/srd/www/x13as/. To compile X-13 for Mac OS X, see the instructions here: https://github.com/christophsax/seasonal/wiki/Compiling-X-13ARIMA-SEATS-from-Source-for-OS-X.
# In[1]:
import statsmodels as sm
import fredpy as fp
import matplotlib.pyplot as plt
plt.style.use('classic')
import numpy as np
import pandas as pd
import os,urllib
import warnings
warnings.filterwarnings('ignore')
get_ipython().run_line_magic('matplotlib', 'inline')
# You must change XPATH if you are running this script from anywhere other than the directory containing x13as.
XPATH = os.getcwd()
# Load fredpy api key
fp.api_key = fp.load_api_key('fred_api_key.txt')
# Whether x13 binary is available
x_13 = False
# ## Unemployment Rate
#
# Construct an unemployment series from April 1929 through the most recent date available by concatenating four U.S. unemployment rate series; all of which are available from FRED (https://fred.stlouisfed.org/). Specifically:
#
# 1. Seasonally adjusted unemployment rate for the United States from April 1929 through February 1940. FRED series ID: M0892AUSM156SNBR. NBER Indicator: m08292a.
# 2. Seasonally adjusted unemployment rate for the United States from March 1940 through December 1946. FRED series ID: M0892BUSM156SNBR. NBER Indicator: m08292b.
# 3. Seasonally adjusted unemployment rate for the United States from January 1947 through December 1947. FRED series ID: M0892CUSM156NNBR. NBER Indicator: m08292c. Note: The source data are not seasonally adjusted and contain observations through December 1966. Seasonally adjust the entire series through December 1966 using the U.S. Census Bureau's X-13-ARIMA seasonal adjustment program. Then discard values after December 1947. *Only downloaded if `x_13 == True.`*
# 4. Seasonally adjusted unemployment rate for the United States from January 1948 through the most recent date available. FRED series ID: UNRATE.
# In[2]:
# Historical US unemployment rate from the NBER Macrohistory Database: 1929-04-01 to 1940-02-01;
# Seasonally adjusted
# Download from FRED and save as a Pandas series
unemp_1 = fp.series('M0892AUSM156SNBR')
unemp_1 = unemp_1.window(['04-01-1929','02-01-1940']).data
# In[3]:
# Historical US unemployment rate from the NBER Macrohistory Database: 1940-03-01 to 1946-12-01;
# Seasonally adjusted
# Download from FRED and save as a Pandas series
unemp_2 = fp.series('M0892BUSM156SNBR')
unemp_2 = unemp_2.window(['03-01-1940','12-01-1946']).data
# In[4]:
# Historical US unemployment rate from the NBER Macrohistory Database: 1947-01-01 to 1966-12-01;
# Raw series is *not* seasonally adjusted
if x_13:
# Download from FRED
unemp_3 = fp.series('M0892CUSM156NNBR')
unemp_3 = unemp_3.window(['01-01-1947','12-01-1966']).data
# Run x13_arima_analysis to obtain SA unemployment data.
x13results = sm.tsa.x13.x13_arima_analysis(endog = unemp_3,x12path=XPATH, outlier=False,print_stdout=True)
unemp_3 = pd.Series(x13results.seasadj.values,index=unemp_3.index)
unemp_3 = unemp_3[(unemp_3.index>=pd.to_datetime('01-01-1947')) & (unemp_3.index<=pd.to_datetime('12-01-1947'))]
# Export the series to txt
unemp_3.to_csv('../txt/unemployment_1947.txt',sep='\t')
else:
# Import data
unemp_3 = pd.read_csv('../txt/unemployment_1947.txt',sep='\t',index_col=0,parse_dates=True)['0']
# In[5]:
# US civilian unemployment rate from the BLS: 1948-01-01 to most recent;
# Seasonally adjusted
unemp_4 = fp.series('UNRATE')
unemp_4 = unemp_4.window(['01-01-1948','01-01-2200']).data
# In[6]:
# Concatenate the first three series
unemployment_rate_series = unemp_1.append(unemp_2).sort_index()
unemployment_rate_series = unemployment_rate_series.append(unemp_3).sort_index()
unemployment_rate_series = unemployment_rate_series.append(unemp_4).sort_index()
# plot the series and save the figure
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(unemployment_rate_series,'-',lw=4,alpha = 0.65)
ax.set_ylabel('Percent')
ax.grid()
fig.tight_layout()
plt.savefig('../png/fig_data_unrate.png',bbox_inches='tight',dpi=120)
#
# ## Vacancies (Job openings)
#
# Construct a series of vacancies for the United States going back to April 1929 by scaling and concatenating three series:
# 1. Help-wanted advertising in newspapers index for United States from April 1929 to January 1960. FRED series ID: M0882AUSM349NNBR. NBER Indicator: m08082a. Note: The source data are not seasonally adjusted and contain observations through August 1960. Seasonally adjust the entire series through August 1960 using the United States Census Bureau's X-13-ARIMA seasonal adjustment program. Then discard values after January 1960. *Only downloaded if `x_13 == True.`*
# 2. Composite help-wanted index from January 1960 through January 2001 constructed using the method described in and Barnichon (2010). The data are from Barnichon's website https://sites.google.com/site/regisbarnichon/data. Scale this series so that its value in January 1960 equals the value of the NBER's help-wanted index for the same date.
# 3. Job openings, total nonfarm for the United States from January 2001 to the most recent date available. FRED series ID: JTSJOL. Scale this series so that its value in January 2001 equals the value of the scaled help-wanted index from Barnichon for the same date.
# In[7]:
if x_13:
# Met life help-wanted index: 1919-01-01 to 1960-08-01;
# Not seasonally adjusted
vac_1 = fp.series('M0882AUSM349NNBR').data
# temp_series = pd.Series(vac_1.data,index=pd.to_datetime(vac_1.dates))
# Run x13_arima_analysis to obtain SA vacancy rate data.
x13results = sm.tsa.x13.x13_arima_analysis(endog = vac_1,x12path=XPATH, outlier=False,print_stdout=True)
vac_1 = pd.Series(x13results.seasadj.values,index=vac_1.index)
vac_1 = vac_1[(vac_1.index>=pd.to_datetime('04-01-1929')) ]
# Export the series to txt
vac_1.to_csv('../txt/vacancies_1929-1960.txt',sep='\t')
else:
vac_1 = pd.read_csv('../txt/vacancies_1929-1960.txt',sep='\t',index_col=0,parse_dates=True)['0']
# In[8]:
# Composite help-wanted index from Regis Barnichon's site: https://sites.google.com/site/regisbarnichon;
# Seasonally adjusted
# Import data from Regis Barnichon's site
dls = 'https://sites.google.com/site/regisbarnichon/cv/HWI_index.txt?attredirects=0'
try:
urllib.urlretrieve(dls, '../txt/HWI_index.txt')
except:
try:
urllib.request.urlretrieve(dls, '../txt/HWI_index.txt')
except:
print('HWI_index.txt is no longer available at given URL')
vac_2 = pd.read_csv('../txt/HWI_index.txt',delimiter='\t',skiprows=6)
vac_2.columns = ['Date','composite HWI']
# Manage dates
dates = []
for d in vac_2['Date']:
dates.append(d[-2:]+'-01-'+d[0:4])
vac_2 = pd.Series(vac_2['composite HWI'].values,index = pd.to_datetime(dates))
# Compute a scaling factor to ensure that the January 1, 1960 values of the first vacancy series match
# the second.
scaling = vac_1.loc['01-01-1960']/vac_2.loc['1960-01-01']
vac_2 = scaling* vac_2
# In[9]:
# Job Openings and Labor Turnover Survey (JOLTS) : December 1, 2000 to present
# Seasonally adjusted
vac_3 = fp.series('JTSJOL').data
# Compute a scaling factor to ensure that the December 1, 2000 values of the first vacancy series match
# the second.
scaling = vac_2.loc['12-01-2000']/vac_3.loc['12-01-2000']
vac_3 = scaling* vac_3
# In[10]:
# Truncate each series
vac_1 = vac_1.loc[:'12-01-1959']
vac_2 = vac_2.loc['01-01-1960':'12-01-2000']
vac_3 = vac_3.loc['01-01-2001':]
# Plot the three truncated and scaled series to verify that they line up
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(vac_1,'-',lw=3,alpha = 0.65)
ax.plot(vac_2,'-',lw=3,alpha = 0.65)
ax.plot(vac_3,'-',lw=3,alpha = 0.65)
ax.set_title('Vacancies (unscaled)')
ax.grid()
# In[11]:
# Create the vacancy series
vacancy_series_unscaled = vac_1.append(vac_2).sort_index()
vacancy_series_unscaled = vacancy_series_unscaled.append(vac_3).sort_index()
# plot the series and save the figure
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot_date(vacancy_series_unscaled.index,vacancy_series_unscaled.values,'-',lw=3,alpha = 0.65)
ax.set_title('Vacancies (unscaled)')
ax.grid()
fig.tight_layout()
plt.savefig('../png/fig_data_vacancies.png',bbox_inches='tight',dpi=120)
# ## Labor force data
#
# Next, construct monthly labor force data for the United States from April 1929 by concatenating two series:
# 1. Civilian labor force for the United States from January 1948 to the most recent date available. FRED series ID: CLF16OV.
# 2. Historical national population estimates from Population Estimates Program, Population Division, U.S. Census Bureau. The source data are annual from July 1, 1900 to July 1, 1999 and not seasonally adjusted. Extend the data to monthly frequency by linear interpolation and discard observations before April 1929 and after January 1948. Then scale this series so that its value in January 1948 equals the value of the civilian labor force series for the same date.
# In[12]:
# Civilian labor force over 16 years of age in thousands of persons: January 1948 to present;
# Seasonally adjusted
lf_1 = fp.series('CLF16OV')
lf_1 = lf_1.window(['01-01-1800','06-01-2216']).data
# In[13]:
# Historical National Population Estimates: July 1, 1900 to July 1, 1999
# Source: Population Estimates Program, Population Division, U.S. Census Bureau
# Annual, Not seasonally adjusted
# Retrieve data from Census
dls = 'http://www.census.gov/popest/data/national/totals/pre-1980/tables/popclockest.txt'
dls = 'https://www.census.gov/population/estimates/nation/popclockest.txt'
dls = 'https://www2.census.gov/programs-surveys/popest/tables/1900-1980/national/totals/popclockest.txt'
try:
urllib.urlretrieve(dls, '../txt/popclockest.txt')
except:
try:
urllib.request.urlretrieve(dls, '../txt/popclockest.txt')
except:
print('popclockest.txt is no longer available at given URL')
# Import data and edit file
with open('../txt/popclockest.txt','r') as newfile:
lines = newfile.readlines()
# Remove leading and trailing whitespace and overwrite spaces in with tabs in lines
newlines = []
for i,line in enumerate(lines):
newline = line.rstrip().lstrip()
newline = newline.replace(' ','\t')
newline = newline.replace(' ','\t')
newline = newline.replace(' ','\t')
newline = newline+'\n'
newlines.append(newline)
# Collect the population and date information
pop = []
dates=[]
for i,line in enumerate(newlines[9:]):
if len(line.split('\t'))==4:
line_split = line.split('\t')
dates.append(line_split[0])
pop.append(float(line_split[1].replace(',','')))
# Form the series
lf_2 = pd.Series(pop,index = pd.to_datetime(dates))
# Resample data as monthly and interpolate
lf_2 = lf_2.sort_index()
lf_2 = lf_2.resample('M').mean().interpolate()
# Set dates to begining of month instead of middle
lf_2.index = lf_2.index + pd.offsets.MonthBegin(0)
# Compute a scaling factor to ensure that the Jaunary 1, 1948 values of the first LF series match
# the second.
scaling = lf_1.iloc[0]/lf_2[lf_2.index==pd.to_datetime('1948-01-01')].values[0]
lf_2 = scaling*lf_2[(lf_2.index>=pd.to_datetime('1929-04-01')) & (lf_2.index<pd.to_datetime('1948-01-01'))]
# In[14]:
# Plot the two truncated and scaled series to verify that they line up
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(lf_2,'-',lw=3,alpha = 0.65)
ax.plot(lf_1,'-',lw=3,alpha = 0.65)
ax.set_title('Labor force')
ax.grid()
fig.tight_layout()
# In[15]:
# form the labor force series
labor_force_series = lf_1.append(lf_2).sort_index()
# plot the series and save the figure
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(labor_force_series/1000,'-',lw=4,alpha = 0.65)
ax.set_title('Labor force')
ax.set_ylabel('Millions of persons')
ax.grid()
fig.tight_layout()
plt.savefig('../png/fig_data_labor_force.png',bbox_inches='tight',dpi=120)
# ## Vacancy rate
#
# Now with a vacancy series and a labor force series, compute the monthly vacancy rate for the Unite States by dividing the vacancy rate series by the labor force series. Following Petrosky-Nadeau and Zhang (2013), scale the result so that the average vacancy rate for 1965 is 2.05\% in order to match the vacancy rate estimate for 1965 obtained by Zagorsky (1998).
# In[16]:
# Construct the vacancy_rate series
vacancy_rate_series = vacancy_series_unscaled / labor_force_series
# Compute a scaling factor to ensure that the average vacancy rate for 1965 is 0.0205
scaling = vacancy_rate_series[(vacancy_rate_series.index>=pd.to_datetime('1965-01-01')) & (vacancy_rate_series.index<=pd.to_datetime('1965-12-01'))].mean()/0.0205
vacancy_rate_series = 100*vacancy_rate_series/scaling
vacancy_series = vacancy_rate_series*labor_force_series/100
unemployment_series = unemployment_rate_series*labor_force_series/100
market_tightness_series = vacancy_series/unemployment_series
# In[17]:
# plot the series and save the figure
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(vacancy_rate_series,'-',lw=4,alpha = 0.65)
ax.set_ylabel('Vacancy rate')
ax.grid()
fig.tight_layout()
plt.savefig('../png/fig_data_vacancy_rate.png',bbox_inches='tight',dpi=120)
# ## Organize data
#
# In the rest of the program, organize the data into DataFrames, construct plots that used in the paper, and export datasets that can be used to replicate the figures and to investigate carefully the data more carefully.
# In[18]:
# Organize data into DataFrames
df_rates = | pd.concat([unemployment_rate_series,vacancy_rate_series,market_tightness_series], join='outer', axis = 1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on 2017-7-7
@author: cheng.li
"""
import abc
import sys
import pandas as pd
from sqlalchemy import and_
from sqlalchemy import not_
from sqlalchemy import or_
from sqlalchemy import select
from alphamind.data.dbmodel.models import Universe as UniverseTable
class BaseUniverse(metaclass=abc.ABCMeta):
@abc.abstractmethod
def condition(self):
pass
def __add__(self, rhs):
return OrUniverse(self, rhs)
def __sub__(self, rhs):
return XorUniverse(self, rhs)
def __and__(self, rhs):
return AndUniverse(self, rhs)
def __or__(self, rhs):
return OrUniverse(self, rhs)
def isin(self, rhs):
return AndUniverse(self, rhs)
@abc.abstractmethod
def save(self):
pass
@classmethod
def load(cls, u_desc: dict):
pass
def query(self, engine, start_date: str = None, end_date: str = None, dates=None):
if hasattr(UniverseTable, "flag"):
more_conditions = [UniverseTable.flag == 1]
else:
more_conditions = []
query = select([UniverseTable.trade_date, UniverseTable.code.label("code")]).where(
and_(
self._query_statements(start_date, end_date, dates),
*more_conditions
)
).order_by(UniverseTable.trade_date, UniverseTable.code)
df = pd.read_sql(query, engine.engine)
df["trade_date"] = | pd.to_datetime(df["trade_date"]) | pandas.to_datetime |
"""
Detection Recipe - 192.168.3.11
References:
(1) 'Asteroseismic detection predictions: TESS' by Chaplin (2015)
(2) 'On the use of empirical bolometric corrections for stars' by Torres (2010)
(3) 'The amplitude of solar oscillations using stellar techniques' by Kjeldson (2008)
(4) 'An absolutely calibrated Teff scale from the infrared flux method'
by Casagrande (2010) table 4
(5) 'Characterization of the power excess of solar-like oscillations in red giants with Kepler'
by Mosser (2011)
(6) 'Predicting the detectability of oscillations in solar-type stars observed by Kepler'
by Chaplin (2011)
(7) 'The connection between stellar granulation and oscillation as seen by the Kepler mission'
by Kallinger et al (2014)
(8) 'The Transiting Exoplanet Survey Satellite: Simulations of Planet Detections and
Astrophysical False Positives' by Sullivan et al. (2015)
(9) Astropysics module at https://pythonhosted.org/Astropysics/coremods/coords.html
(10) <NAME>'s calc_noise IDL procedure for TESS.
(11) <NAME>lin's soldet6 IDL procedure to calculate the probability of detecting
oscillations with Kepler.
(12) Coordinate conversion at https://ned.ipac.caltech.edu/forms/calculator.html
(13) Bedding 1996
(14) 'The Asteroseismic potential of TESS' by Campante et al. 2016
"""
import numpy as np
from itertools import groupby
from operator import itemgetter
import sys
import pandas as pd
from scipy import stats
import warnings
warnings.simplefilter("ignore")
def bv2teff(b_v):
# from Torres 2010 table 2. Applies to MS, SGB and giant stars
# B-V limits from Flower 1996 fig 5
a = 3.979145106714099
b = -0.654992268598245
c = 1.740690042385095
d = -4.608815154057166
e = 6.792599779944473
f = -5.396909891322525
g = 2.192970376522490
h = -0.359495739295671
lteff = a + b*b_v + c*(b_v**2) + d*(b_v**3) + e*(b_v**4) + f*(b_v**5) + g*(b_v**6) + h*(b_v**7)
teff = 10.0**lteff
return teff
# from <NAME> 2003. BCv values from Flower 1996 polynomials presented in Torres 2010
# Av is a keword argument. If reddening values not available, ignore it's effect
def Teff2bc2lum(teff, parallax, parallax_err, vmag, Av=0):
lteff = np.log10(teff)
BCv = np.full(len(lteff), -100.5)
BCv[lteff<3.70] = (-0.190537291496456*10.0**5) + \
(0.155144866764412*10.0**5*lteff[lteff<3.70]) + \
(-0.421278819301717*10.0**4.0*lteff[lteff<3.70]**2.0) + \
(0.381476328422343*10.0**3*lteff[lteff<3.70]**3.0)
BCv[(3.70<lteff) & (lteff<3.90)] = (-0.370510203809015*10.0**5) + \
(0.385672629965804*10.0**5*lteff[(3.70<lteff) & (lteff<3.90)]) + \
(-0.150651486316025*10.0**5*lteff[(3.70<lteff) & (lteff<3.90)]**2.0) + \
(0.261724637119416*10.0**4*lteff[(3.70<lteff) & (lteff<3.90)]**3.0) + \
(-0.170623810323864*10.0**3*lteff[(3.70<lteff) & (lteff<3.90)]**4.0)
BCv[lteff>3.90] = (-0.118115450538963*10.0**6) + \
(0.137145973583929*10.0**6*lteff[lteff > 3.90]) + \
(-0.636233812100225*10.0**5*lteff[lteff > 3.90]**2.0) + \
(0.147412923562646*10.0**5*lteff[lteff > 3.90]**3.0) + \
(-0.170587278406872*10.0**4*lteff[lteff > 3.90]**4.0) + \
(0.788731721804990*10.0**2*lteff[lteff > 3.90]**5.0)
u = 4.0 + 0.4 * 4.73 - 2.0 * np.log10(parallax) - 0.4 * (vmag - Av + BCv)
lum = 10**u # in solar units
e_lum = (2.0 / parallax * 10**u)**2 * parallax_err**2
e_lum = np.sqrt(e_lum)
return lum, e_lum
# calculate seismic parameters
def seismicParameters(teff, lum):
# solar parameters
teff_solar = 5777.0 # Kelvin
teffred_solar = 8907.0 #in Kelvin
numax_solar = 3090.0 # in micro Hz
dnu_solar = 135.1 # in micro Hz
cadence = 120 # in s
vnyq = (1.0 / (2.0*cadence)) * 10**6 # in micro Hz
teffred = teffred_solar*(lum**-0.093) # from (6) eqn 8. red-edge temp
rad = lum**0.5 * ((teff/teff_solar)**-2) # Steffan-Boltzmann law
numax = numax_solar*(rad**-1.85)*((teff/teff_solar)**0.92) # from (14)
return cadence, vnyq, rad, numax, teffred, teff_solar, teffred_solar, numax_solar, dnu_solar
# no coordinate conversion before calculating tess field observing time. Only
# works with ecliptic coordinates
def tess_field_only(e_lng, e_lat):
# create a list to append all of the total observing times 'T' in the TESS field to
T = [] # units of sectors (0-13)
# create a list to append all of the maximum contiguous observations to
max_T = [] # units of sectors (0-13)
for star in range(len(e_lng)):
# 'n' defines the distance between each equidistant viewing sector in the TESS field.
n = 360.0/13
# Define a variable to count the total number of sectors a star is observed in.
counter = 0
# Define a variable to count all of the observations for each star.
# Put each observation sector into sca separately in order to find the largest number
# of contiguous observations for each star.
sca = []
# 'ranges' stores all of the contiguous observations for each star.
ranges = []
# Defines the longitude range of the observing sectors at the inputted stellar latitude
lngrange = 24.0/abs(np.cos(np.radians(e_lat[star])))
if lngrange>=360.0:
lngrange=360.0
# if the star is in the northern hemisphere:
if e_lat[star] >= 0.0:
# For each viewing sector.
for i in range(1,14):
# Define an ra position for the centre of each sector in increasing longitude.
# if a hemisphere has an overshoot, replace 0.0 with the value.
a = 0.0+(n*(i-1))
# calculate the distances both ways around the
# circle between the star and the centre of the sector.
# The smallest distance is the one that should be used
# to see if the star lies in the observing sector.
d1 = abs(e_lng[star]-a)
d2 = (360.0 - abs(e_lng[star]-a))
if d1>d2:
d1 = d2
# if the star is in the 'overshoot' region for some sectors, calculate d3 and d4;
# the distances both ways around the circle bwtween the star and the centre of the
# 'overshooting past the pole' region of the sector.
# The smallest distance is the one that should be used
# to see if the star lies in the observing sector.
# the shortest distances between the centre of the sector and star, and the sector's
# overshoot and the star should add to 180.0 apart (i.e d1+d3=180.0)
d3 = abs(e_lng[star] - (a+180.0)%360.0)
d4 = 360.0 - abs(e_lng[star] - (a+180.0)%360.0)
if d3>d4:
d3 = d4
# check if a star lies in the field of that sector.
if (d1<=lngrange/2.0 and 6.0<=e_lat[star]) or (d3<=lngrange/2.0 and 78.0<=e_lat[star]):
counter += 1
sca = np.append(sca, i)
else:
pass
# if the star is in the southern hemisphere:
if e_lat[star] < 0.0:
# For each viewing sector.
for i in range(1,14):
# Define an ra position for the centre of each sector in increasing longitude.
# if a hemisphere has an overshoot, replace 0.0 with the value.
a = 0.0+(n*(i-1))
# calculate the distances both ways around the
# circle between the star and the centre of the sector.
# The smallest distance is the one that should be used
# to see if the star lies in the observing sector.
d1 = abs(e_lng[star]-a)
d2 = (360 - abs(e_lng[star]-a))
if d1>d2:
d1 = d2
# if the star is in the 'overshoot' region for some sectors, calculate d3 and d4;
# the distances both ways around the circle between the star and the centre of the
# 'overshooting past the pole' region of the sector.
# The smallest distance of the 2 is the one that should be used
# to see if the star lies in the observing sector.
d3 = abs(e_lng[star] - (a+180.0)%360.0)
d4 = (360 - abs(e_lng[star] - (a+180.0)%360.0))
if d3>d4:
d3 = d4
# check if a star lies in the field of that sector.
if (d1<=lngrange/2.0 and -6.0>=e_lat[star]) or (d3<=lngrange/2.0 and -78.0>=e_lat[star]):
counter += 1
sca = np.append(sca, i)
else:
pass
if len(sca) == 0:
ranges = [0]
else:
for k,g in groupby(enumerate(sca), lambda i_x:i_x[0]-i_x[1]):
group = map(itemgetter(1), g)
if np.array(group).sum() !=0:
ranges.append([len(list(group))])
T=np.append(T, counter)
max_T = np.append(max_T, np.max(np.array(ranges)))
return T, max_T
def calc_noise(imag, exptime, teff, e_lng = 0, e_lat = 30, g_lng = 96, g_lat = -30, subexptime = 2.0, npix_aper = 10, \
frac_aper = 0.76, e_pix_ro = 10, geom_area = 60.0, pix_scale = 21.1, sys_limit = 0):
omega_pix = pix_scale**2.0
n_exposures = exptime/subexptime
# electrons from the star
megaph_s_cm2_0mag = 1.6301336 + 0.14733937*(teff-5000.0)/5000.0
e_star = 10.0**(-0.4*imag) * 10.0**6 * megaph_s_cm2_0mag * geom_area * exptime * frac_aper
e_star_sub = e_star*subexptime/exptime
# e/pix from zodi
dlat = (abs(e_lat)-90.0)/90.0
vmag_zodi = 23.345 - (1.148*dlat**2.0)
e_pix_zodi = 10.0**(-0.4*(vmag_zodi-22.8)) * (2.39*10.0**-3) * geom_area * omega_pix * exptime
# e/pix from background stars
dlat = abs(g_lat)/40.0*10.0**0
dlon = g_lng
q = np.where(dlon>180.0)
if len(q[0])>0:
dlon[q] = 360.0-dlon[q]
dlon = abs(dlon)/180.0*10.0**0
p = [18.97338*10.0**0, 8.833*10.0**0, 4.007*10.0**0, 0.805*10.0**0]
imag_bgstars = p[0] + p[1]*dlat + p[2]*dlon**(p[3])
e_pix_bgstars = 10.0**(-0.4*imag_bgstars) * 1.7*10.0**6 * geom_area * omega_pix * exptime
# compute noise sources
noise_star = np.sqrt(e_star) / e_star
noise_sky = np.sqrt(npix_aper*(e_pix_zodi + e_pix_bgstars)) / e_star
noise_ro = np.sqrt(npix_aper*n_exposures)*e_pix_ro / e_star
noise_sys = 0.0*noise_star + sys_limit/(1*10.0**6)/np.sqrt(exptime/3600.0)
noise1 = np.sqrt(noise_star**2.0 + noise_sky**2.0 + noise_ro**2.0)
noise2 = np.sqrt(noise_star**2.0 + noise_sky**2.0 + noise_ro**2.0 + noise_sys**2.0)
return noise2
# calculate the granulation at a set of frequencies from (7) eqn 2 model F
def granulation(nu0, dilution, a_nomass, b1, b2, vnyq):
# Divide by dilution squared as it affects stars in the time series.
# The units of dilution change from ppm to ppm^2 microHz^-1 when going from the
# time series to frequency. p6: c=4 and zeta = 2*sqrt(2)/pi
Pgran = (((2*np.sqrt(2))/np.pi) * (a_nomass**2/b1) / (1 + ((nu0/b1)**4)) \
+ ((2*np.sqrt(2))/np.pi) * (a_nomass**2/b2) / (1 + ((nu0/b2)**4))) / (dilution**2)
# From (9). the amplitude suppression factor. Normalised sinc with pi (area=1)
eta = np.sinc((nu0/(2*vnyq)))
# the granulation after attenuation
Pgran = Pgran * eta**2
return Pgran, eta
# the total number of pixels used by the highest ranked x number of targets in the tCTL
def pixel_cost(x):
N = np.ceil(10.0**-5.0 * 10.0**(0.4*(20.0-x)))
N_tot = 10*(N+10)
total = np.cumsum(N_tot)
# want to find: the number of ranked tCTL stars (from highest to lowest rank) that correspond to a pixel cost of 1.4Mpix at a given time
per_cam = 26*4 # to get from the total pixel cost to the cost per camera at a given time, divide by this
pix_limit = 1.4e6 # the pixel limit per camera at a given time
return total[-1], per_cam, pix_limit, N_tot
# detection recipe to find whether a star has an observed solar-like Gaussian mode power excess
def globalDetections(g_lng, g_lat, e_lng, e_lat, imag, \
lum, rad, teff, numax, max_T, teffred, teff_solar, \
teffred_solar, numax_solar, dnu_solar, sys_limit, dilution, vnyq, cadence, vary_beta=False):
dnu = dnu_solar*(rad**-1.42)*((teff/teff_solar)**0.71) # from (14) eqn 21
beta = 1.0-np.exp(-(teffred-teff)/1550.0) # beta correction for hot solar-like stars from (6) eqn 9.
if isinstance(teff, float): # for only 1 star
if (teff>=teffred):
beta = 0.0
else:
beta[teff>=teffred] = 0.0
# to remove the beta correction, set Beta=1
if vary_beta == False:
beta = 1.0
# modified from (6) eqn 11. Now consistent with dnu proportional to numax^0.77 in (14)
amp = 0.85*2.5*beta*(rad**1.85)*((teff/teff_solar)**0.57)
# From (5) table 2 values for delta nu_{env}. env_width is defined as +/- some value.
env_width = 0.66 * numax**0.88
env_width[numax>100.] = numax[numax>100.]/2. # from (6) p12
total, per_cam, pix_limit, npix_aper = pixel_cost(imag)
noise = calc_noise(imag=imag, teff=teff, exptime=cadence, e_lng=e_lng, e_lat=e_lat, \
g_lng=g_lng, g_lat=g_lat, sys_limit=sys_limit, npix_aper=npix_aper)
noise = noise*10.0**6 # total noise in units of ppm
a_nomass = 0.85 * 3382*numax**-0.609 # multiply by 0.85 to convert to redder TESS bandpass.
b1 = 0.317 * numax**0.970
b2 = 0.948 * numax**0.992
# call the function for the real and aliased components (above and below vnyq) of the granulation
# the order of the stars is different for the aliases so fun the function in a loop
Pgran, eta = granulation(numax, dilution, a_nomass, b1, b2, vnyq)
Pgranalias = np.zeros(len(Pgran))
etaalias = np.zeros(len(eta))
# if vnyq is 1 fixed value
if isinstance(vnyq, float):
for i in range(len(numax)):
if numax[i] > vnyq:
Pgranalias[i], etaalias[i] = granulation((vnyq - (numax[i] - vnyq)), \
dilution, a_nomass[i], b1[i], b2[i], vnyq)
elif numax[i] < vnyq:
Pgranalias[i], etaalias[i] = granulation((vnyq + (vnyq - numax[i])), \
dilution, a_nomass[i], b1[i], b2[i], vnyq)
# if vnyq varies for each star
else:
for i in range(len(numax)):
if numax[i] > vnyq[i]:
Pgranalias[i], etaalias[i] = granulation((vnyq[i] - (numax[i] - vnyq[i])), \
dilution, a_nomass[i], b1[i], b2[i], vnyq[i])
elif numax[i] < vnyq[i]:
Pgranalias[i], etaalias[i] = granulation((vnyq[i] + (vnyq[i] - numax[i])), \
dilution, a_nomass[i], b1[i], b2[i], vnyq[i])
Pgrantotal = Pgran + Pgranalias
ptot = (0.5*2.94*amp**2.*((2.*env_width)/dnu)*eta**2.) / (dilution**2.)
Binstr = 2.0 * (noise)**2. * cadence*10**-6.0 # from (6) eqn 18
bgtot = ((Binstr + Pgrantotal) * 2.*env_width) # units are ppm**2
snr = ptot/bgtot # global signal to noise ratio from (11)
fap = 0.05 # false alarm probability
pdet = 1.0 - fap
pfinal = np.full(rad.shape[0], -99)
idx = np.where(max_T != 0) # calculate the indexes where T is not 0
tlen=max_T[idx]*27.4*86400.0 # the length of the TESS observations in seconds
bw=1.0 * (10.0**6.0)/tlen
nbins=(2.*env_width[idx]/bw).astype(int) # from (11)
snrthresh = stats.chi2.ppf(pdet, 2.0*nbins) / (2.0*nbins) - 1.0
pfinal[idx] = stats.chi2.sf((snrthresh+1.0) / (snr[idx]+1.0)*2.0*nbins, 2.*nbins)
return pfinal, snr, dnu # snr is needed in TESS_telecon2.py
def BV2VI(bv, vmag, g_mag_abs):
whole = pd.DataFrame(data={'B-V': bv, 'Vmag': vmag, 'g_mag_abs': g_mag_abs, 'Ai': 0})
# Mg: empirical relation from Tiago to separate dwarfs from giants
# note: this relation is observational; it was made with REDDENED B-V and g_mag values
whole['Mg'] = 6.5*whole['B-V'] - 1.8
# B-V-to-teff limits from (6) fig 5
whole = whole[(whole['B-V'] > -0.4) & (whole['B-V'] < 1.7)]
print(whole.shape, 'after B-V cuts')
# B-V limits for dwarfs and giants, B-V conditions from (1)
# if a star can't be classified as dwarf or giant, remove it
condG = (whole['B-V'] > -0.25) & (whole['B-V'] < 1.75) & (whole['Mg'] > whole['g_mag_abs'])
condD1 = (whole['B-V'] > -0.23) & (whole['B-V'] < 1.4) & (whole['Mg'] < whole['g_mag_abs'])
condD2 = (whole['B-V'] > 1.4) & (whole['B-V'] < 1.9) & (whole['Mg'] < whole['g_mag_abs'])
whole = pd.concat([whole[condG], whole[condD1], whole[condD2]], axis=0)
print(whole.shape, 'after giant/dwarf cuts')
whole['V-I'] = 100. # write over these values for dwarfs and giants separately
# coefficients for giants and dwarfs
cg = [-0.8879586e-2, 0.7390707, 0.3271480, 0.1140169e1, -0.1908637, -0.7898824,
0.5190744, 0.5358868]
cd1 = [0.8906590e-1, 0.1319675e1, 0.4461807, -0.1188127e1, 0.2465572, 0.8478627e1,
0.1046599e2, 0.3641226e1]
cd2 = [-0.5421588e2, 0.8011383e3, -0.4895392e4, 0.1628078e5, -0.3229692e5,
0.3939183e5, -0.2901167e5, 0.1185134e5, -0.2063725e4]
# calculate (V-I) for giants
x = whole['B-V'][condG] - 1
y = (cg[0] + cg[1]*x + cg[2]*(x**2) + cg[3]*(x**3) + cg[4]*(x**4) +\
cg[5]*(x**5) + cg[6]*(x**6) + cg[7]*(x**7))
whole['V-I'][condG] = y + 1
x, y = [[] for i in range(2)]
# calculate (V-I) for dwarfs (1st B-V range)
x = whole['B-V'][condD1] - 1
y = (cd1[0] + cd1[1]*x + cd1[2]*(x**2) + cd1[3]*(x**3) + cd1[4]*(x**4) +\
cd1[5]*(x**5) + cd1[6]*(x**6) + cd1[7]*(x**7))
whole['V-I'][condD1] = y + 1
x, y = [[] for i in range(2)]
# calculate (V-I) for dwarfs (2nd B-V range)
x = whole['B-V'][condD2] - 1
y = (cd2[0] + cd2[1]*x + cd2[2]*(x**2) + cd2[3]*(x**3) + cd2[4]*(x**4) +\
cd2[5]*(x**5) + cd2[6]*(x**6) + cd2[7]*(x**7) + cd2[8]*(x**8))
whole['V-I'][condD2] = y + 1
x, y = [[] for i in range(2)]
# calculate Imag from V-I and reredden it
whole['Imag'] = whole['Vmag']-whole['V-I']
whole['Imag_reddened'] = whole['Imag'] + whole['Ai']
"""
# make Teff, luminosity, Plx and ELat cuts to the data
whole = whole[(whole['teff'] < 7700) & (whole['teff'] > 4300) & \
(whole['Lum'] > 0.3) & (whole['lum_D'] < 50) & ((whole['e_Plx']/whole['Plx']) < 0.5) \
& (whole['Plx'] > 0.) & ((whole['ELat']<=-6.) | (whole['ELat']>=6.))]
print(whole.shape, 'after Teff/L/Plx/ELat cuts')
"""
whole.drop(['Ai', 'Imag_reddened', 'Mg'], axis=1, inplace=True)
return whole.as_matrix().T
# make cuts to the data
def cuts(teff, e_teff, metal, e_metal, g_lng, g_lat, e_lng, e_lat, Tmag, e_Tmag, Vmag, e_Vmag, plx, e_plx, lum, star_name):
d = {'teff':teff, 'e_teff':e_teff, 'metal':metal, 'e_metal':e_metal, 'g_lng':g_lng, 'g_lat':g_lat, 'e_lng':e_lng, 'e_lat':e_lat,
'Tmag':Tmag, 'e_Tmag':e_Tmag, 'Vmag':Vmag, 'e_Vmag':e_Vmag, 'plx':plx, 'e_plx':e_plx, 'lum':lum, 'star_name':star_name}
whole = pd.DataFrame(d, columns = ['teff', 'e_teff', 'metal', 'e_metal', 'g_lng', 'g_lat', 'e_lng', 'e_lat',
'Tmag', 'e_Tmag', 'Vmag', 'e_Vmag', 'plx', 'e_plx', 'lum', 'star_name'])
whole = whole[(whole['teff'] < 7700.) & (whole['teff'] > 4300.) & (whole['e_teff'] > 0.) & \
(whole['lum'] > 0.3) & (whole['lum'] < 50.) & ((whole['e_plx']/whole['plx']) < 0.5) & \
(whole['plx'] > 0.) & ((whole['e_lat']<=-6.) | (whole['e_lat']>=6.)) & \
(whole['Tmag'] > 3.5) & (whole['e_metal'] > 0.)]
print(whole.shape, 'after cuts to the data')
return whole.as_matrix().T
if __name__ == '__main__':
df = pd.read_csv('files/MAST_Crossmatch_TIC4.csv', header=0,
index_col=False)
data = df.values
# star_name = data[:, 1]
teff = pd.to_numeric(data[:, 88])
# e_teff = pd.to_numeric(data[:, 89])
# metal = pd.to_numeric(data[:, 92])
# e_metal = pd.to_numeric(data[:, 93])
# g_lng = pd.to_numeric(data[:, 48])
# g_lat = pd.to_numeric(data[:, 49])
# e_lng = pd.to_numeric(data[:, 50])
# e_lat = pd.to_numeric(data[:, 51])
# Tmag = pd.to_numeric(data[:, 84])
# e_Tmag = pd.to_numeric(data[:, 85])
Vmag = pd.to_numeric(data[:, 54])
# e_Vmag = pd.to_numeric(data[:, 55])
plx = pd.to_numeric(data[:, 45])
e_plx = | pd.to_numeric(data[:, 46]) | pandas.to_numeric |
from datetime import datetime, date
import sys
if sys.version_info >= (2, 7):
from nose.tools import assert_dict_equal
import xlwings as xw
try:
import numpy as np
from numpy.testing import assert_array_equal
def nparray_equal(a, b):
try:
assert_array_equal(a, b)
except AssertionError:
return False
return True
except ImportError:
np = None
try:
import pandas as pd
from pandas import DataFrame, Series
from pandas.util.testing import assert_frame_equal, assert_series_equal
def frame_equal(a, b):
try:
assert_frame_equal(a, b)
except AssertionError:
return False
return True
def series_equal(a, b):
try:
assert_series_equal(a, b)
except AssertionError:
return False
return True
except ImportError:
pd = None
def dict_equal(a, b):
try:
assert_dict_equal(a, b)
except AssertionError:
return False
return True
# Defaults
@xw.func
def read_float(x):
return x == 2.
@xw.func
def write_float():
return 2.
@xw.func
def read_string(x):
return x == 'xlwings'
@xw.func
def write_string():
return 'xlwings'
@xw.func
def read_empty(x):
return x is None
@xw.func
def read_date(x):
return x == datetime(2015, 1, 15)
@xw.func
def write_date():
return datetime(1969, 12, 31)
@xw.func
def read_datetime(x):
return x == datetime(1976, 2, 15, 13, 6, 22)
@xw.func
def write_datetime():
return datetime(1976, 2, 15, 13, 6, 23)
@xw.func
def read_horizontal_list(x):
return x == [1., 2.]
@xw.func
def write_horizontal_list():
return [1., 2.]
@xw.func
def read_vertical_list(x):
return x == [1., 2.]
@xw.func
def write_vertical_list():
return [[1.], [2.]]
@xw.func
def read_2dlist(x):
return x == [[1., 2.], [3., 4.]]
@xw.func
def write_2dlist():
return [[1., 2.], [3., 4.]]
# Keyword args on default converters
@xw.func
@xw.arg('x', ndim=1)
def read_ndim1(x):
return x == [2.]
@xw.func
@xw.arg('x', ndim=2)
def read_ndim2(x):
return x == [[2.]]
@xw.func
@xw.arg('x', transpose=True)
def read_transpose(x):
return x == [[1., 3.], [2., 4.]]
@xw.func
@xw.ret(transpose=True)
def write_transpose():
return [[1., 2.], [3., 4.]]
@xw.func
@xw.arg('x', dates=date)
def read_dates_as1(x):
return x == [[1., date(2015, 1, 13)], [date(2000, 12, 1), 4.]]
@xw.func
@xw.arg('x', dates=date)
def read_dates_as2(x):
return x == date(2005, 1, 15)
@xw.func
@xw.arg('x', dates=datetime)
def read_dates_as3(x):
return x == [[1., datetime(2015, 1, 13)], [datetime(2000, 12, 1), 4.]]
@xw.func
@xw.arg('x', empty='empty')
def read_empty_as(x):
return x == [[1., 'empty'], ['empty', 4.]]
if sys.version_info >= (2, 7):
# assert_dict_equal isn't available on nose for PY 2.6
# Dicts
@xw.func
@xw.arg('x', dict)
def read_dict(x):
return dict_equal(x, {'a': 1., 'b': 'c'})
@xw.func
@xw.arg('x', dict, transpose=True)
def read_dict_transpose(x):
return dict_equal(x, {1.0: 'c', 'a': 'b'})
@xw.func
def write_dict():
return {'a': 1., 'b': 'c'}
# Numpy Array
if np:
@xw.func
@xw.arg('x', np.array)
def read_scalar_nparray(x):
return nparray_equal(x, np.array(1.))
@xw.func
@xw.arg('x', np.array)
def read_empty_nparray(x):
return nparray_equal(x, np.array(np.nan))
@xw.func
@xw.arg('x', np.array)
def read_horizontal_nparray(x):
return nparray_equal(x, np.array([1., 2.]))
@xw.func
@xw.arg('x', np.array)
def read_vertical_nparray(x):
return nparray_equal(x, np.array([1., 2.]))
@xw.func
@xw.arg('x', np.array)
def read_date_nparray(x):
return nparray_equal(x, np.array(datetime(2000, 12, 20)))
# Keyword args on Numpy arrays
@xw.func
@xw.arg('x', np.array, ndim=1)
def read_ndim1_nparray(x):
return nparray_equal(x, np.array([2.]))
@xw.func
@xw.arg('x', np.array, ndim=2)
def read_ndim2_nparray(x):
return nparray_equal(x, np.array([[2.]]))
@xw.func
@xw.arg('x', np.array, transpose=True)
def read_transpose_nparray(x):
return nparray_equal(x, np.array([[1., 3.], [2., 4.]]))
@xw.func
@xw.ret(transpose=True)
def write_transpose_nparray():
return np.array([[1., 2.], [3., 4.]])
@xw.func
@xw.arg('x', np.array, dates=date)
def read_dates_as_nparray(x):
return nparray_equal(x, np.array(date(2000, 12, 20)))
@xw.func
@xw.arg('x', np.array, empty='empty')
def read_empty_as_nparray(x):
return nparray_equal(x, np.array('empty'))
@xw.func
def write_np_scalar():
return np.float64(2)
# Pandas Series
if pd:
@xw.func
@xw.arg('x', pd.Series, header=False, index=False)
def read_series_noheader_noindex(x):
return series_equal(x, pd.Series([1., 2.]))
@xw.func
@xw.arg('x', pd.Series, header=False, index=True)
def read_series_noheader_index(x):
return series_equal(x, pd.Series([1., 2.], index=[10., 20.]))
@xw.func
@xw.arg('x', pd.Series, header=True, index=False)
def read_series_header_noindex(x):
return series_equal(x, pd.Series([1., 2.], name='name'))
@xw.func
@xw.arg('x', pd.Series, header=True, index=True)
def read_series_header_named_index(x):
return series_equal(x, pd.Series([1., 2.], name='name', index=pd.Index([10., 20.], name='ix')))
@xw.func
@xw.arg('x', pd.Series, header=True, index=True)
def read_series_header_nameless_index(x):
return series_equal(x, pd.Series([1., 2.], name='name', index=[10., 20.]))
@xw.func
@xw.arg('x', pd.Series, header=True, index=2)
def read_series_header_nameless_2index(x):
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]])
return series_equal(x, pd.Series([1., 2.], name='name', index=ix))
@xw.func
@xw.arg('x', pd.Series, header=True, index=2)
def read_series_header_named_2index(x):
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]], names=['ix1', 'ix2'])
return series_equal(x, pd.Series([1., 2.], name='name', index=ix))
@xw.func
@xw.arg('x', pd.Series, header=False, index=2)
def read_series_noheader_2index(x):
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]])
return series_equal(x, pd.Series([1., 2.], index=ix))
@xw.func
@xw.ret(pd.Series, index=False)
def write_series_noheader_noindex():
return pd.Series([1., 2.])
@xw.func
@xw.ret(pd.Series, index=True)
def write_series_noheader_index():
return pd.Series([1., 2.], index=[10., 20.])
@xw.func
@xw.ret(pd.Series, index=False)
def write_series_header_noindex():
return pd.Series([1., 2.], name='name')
@xw.func
def write_series_header_named_index():
return pd.Series([1., 2.], name='name', index=pd.Index([10., 20.], name='ix'))
@xw.func
@xw.ret(pd.Series, index=True, header=True)
def write_series_header_nameless_index():
return pd.Series([1., 2.], name='name', index=[10., 20.])
@xw.func
@xw.ret(pd.Series, header=True, index=2)
def write_series_header_nameless_2index():
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]])
return pd.Series([1., 2.], name='name', index=ix)
@xw.func
@xw.ret(pd.Series, header=True, index=2)
def write_series_header_named_2index():
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]], names=['ix1', 'ix2'])
return pd.Series([1., 2.], name='name', index=ix)
@xw.func
@xw.ret(pd.Series, header=False, index=2)
def write_series_noheader_2index():
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]])
return pd.Series([1., 2.], index=ix)
@xw.func
@xw.arg('x', pd.Series)
def read_timeseries(x):
return series_equal(x, pd.Series([1.5, 2.5], name='ts', index=[datetime(2000, 12, 20), datetime(2000, 12, 21)]))
@xw.func
@xw.ret(pd.Series)
def write_timeseries():
return pd.Series([1.5, 2.5], name='ts', index=[datetime(2000, 12, 20), datetime(2000, 12, 21)])
@xw.func
@xw.ret(pd.Series, index=False)
def write_series_nan():
return pd.Series([1, np.nan, 3])
# Pandas DataFrame
if pd:
@xw.func
@xw.arg('x', pd.DataFrame, index=False, header=False)
def read_df_0header_0index(x):
return frame_equal(x, pd.DataFrame([[1., 2.], [3., 4.]]))
@xw.func
@xw.ret(pd.DataFrame, index=False, header=False)
def write_df_0header_0index():
return | pd.DataFrame([[1., 2.], [3., 4.]]) | pandas.DataFrame |
from bs4 import BeautifulSoup
import logging
import pandas as pd
import re
import requests
from urllib.parse import urljoin
logging.basicConfig(format="%(asctime)s %(levelname)s:%(message)s", level=logging.INFO)
def get_html(url):
return requests.get(url).text
class CongressCrawler:
def __init__(self):
self.base_url = "https://www.camara.leg.br/"
self.congress = []
self.search_url = (
self.base_url + "deputados/quem-sao/resultado?search=&partido=&uf=&sexo="
)
def get_congressperson_data(self, url):
try:
soup = BeautifulSoup(get_html(url), "html.parser")
name = soup.find(id="nomedeputado").contents[0]
party_state = soup.find(class_="foto-deputado__partido-estado").contents[0]
party = re.findall(r".+?(?=\s-)", party_state)[0]
email = soup.find(class_="email").contents[0]
congressperson = {
"name": name,
"party": party,
"email": email,
}
return congressperson
except Exception:
logging.exception(f"failed at {url}")
return
def get_congressperson_href(self, soup):
for link in soup.find_all(href=re.compile(r"/deputados/\d.*")):
yield urljoin(self.base_url, link.get("href"))
def get_congress_by_page(self, url):
logging.info(f"page: {url}")
soup = BeautifulSoup(get_html(url), "html.parser")
for congressperson_url in self.get_congressperson_href(soup):
congressperson = self.get_congressperson_data(congressperson_url)
if congressperson:
self.congress.append(congressperson)
logging.info(
f'congressperson: {congressperson_url} - email: {congressperson["email"]} - party: {congressperson["party"]}'
)
def get_total_congress(self, legislature):
soup = BeautifulSoup(
get_html(self.search_url + "&legislatura=" + legislature),
"html.parser",
)
pfound = soup.find(text=re.compile(r"\d+\sencontrados"))
total = re.findall(r"\d{3,}", pfound)[0]
return total
def get_current_legislature(self):
soup = BeautifulSoup(get_html(self.base_url), "html.parser")
found = soup.find(text=re.compile(r"\d.*\sLegislatura"))
legislature = re.findall(r"\d\d", found)[0]
return legislature
def run(self):
try:
legislature = self.get_current_legislature()
total = self.get_total_congress(legislature)
pages = round(int(total) / 25) + 1
for i in range(1, pages):
self.get_congress_by_page(
self.search_url
+ "&legislatura="
+ legislature
+ "&pagina="
+ str(i)
)
except Exception:
logging.exception("global failure")
finally:
df = | pd.DataFrame(self.congress) | pandas.DataFrame |
#!/usr/bin/env python3
"""Script for exporting tensorboard logs to csv."""
import re
import numpy as np
from collections import defaultdict
import pandas as pd
from tensorboard.backend.event_processing.event_multiplexer import EventMultiplexer
class TensorboardDataHelper():
"""Class to help extrat summary values from the end of multiple runs"""
def __init__(self, logdir, tags=None, tag_filter_fn=None, n_values=1, run_filter_fn=lambda a: True, keep_nans=False):
if tags is None and tag_filter_fn is None:
raise ValueError('Either tags or tag_filter_fn must be defined!')
if tags is not None and tag_filter_fn is not None:
raise ValueError(
'Only one of tags or tag_filter_fn can be defined at once!')
self.logdir = logdir
self.n_values = n_values
self.run_filter_fn = run_filter_fn
self.keep_nans = keep_nans
if tag_filter_fn is not None:
self.tag_filter_fn = tag_filter_fn
else:
self.selected_tags = tags
self.tag_filter_fn = lambda a: a in self.selected_tags
self.event_multiplexer = EventMultiplexer().AddRunsFromDirectory(logdir)
self.reload()
def reload(self):
self.event_multiplexer.Reload()
self.runs_and_scalar_tags = {run: values['scalars']
for run, values in self.event_multiplexer.Runs().items()
if self.run_filter_fn(run)
}
def get_matching_runs(self):
return self.runs_and_scalar_tags.keys()
def _get_last_events(self, list_of_events):
"""Get last scalars in terms of training step"""
def get_training_step(event):
return event.step
events = sorted(list_of_events, key=get_training_step)
if not self.keep_nans:
events = list(filter(lambda ev: np.isfinite(ev.value), events))
return events[-self.n_values:]
def generate_directory_of_values(self):
# Default value of new key is a new dictionary
# makes code look nicer :)
output = defaultdict(dict)
for run, tags in self.runs_and_scalar_tags.items():
for tag in tags:
# skip unwanted tags
if not self.tag_filter_fn(tag):
continue
all_scalars = self.event_multiplexer.Scalars(run, tag)
for ev in self._get_last_events(all_scalars):
output[(run, ev.step)][tag] = ev.value
return output
def generate_pandas_dataframe(self):
dict_of_values = self.generate_directory_of_values()
df = | pd.DataFrame.from_dict(dict_of_values, orient='index') | pandas.DataFrame.from_dict |
import tempfile
import pytest
import pandas as pd
import numpy as np
import pytz
from eemeter.modeling.models.billing import BillingElasticNetCVModel
from eemeter.modeling.formatters import ModelDataBillingFormatter
from eemeter.structures import EnergyTrace
@pytest.fixture
def trace():
index = pd.date_range('6/6/2012','6/6/2013',freq='M',
tz=pytz.UTC)
data = pd.DataFrame(
{
"value": [1,] * 12,
"estimated": [False,] * 12
}, index=index, columns=['value', 'estimated'])
return EnergyTrace(
interpretation="NATURAL_GAS_CONSUMPTION_SUPPLIED",
unit="THERM", data=data)
def test_basic_usage(trace, monkeypatch_temperature_data,
mock_isd_weather_source):
formatter = ModelDataBillingFormatter()
model = BillingElasticNetCVModel(65, 65)
formatted_input_data = formatter.create_input(trace,
mock_isd_weather_source)
outputs = model.fit(formatted_input_data)
assert 'upper' in outputs
assert 'lower' in outputs
assert 'n' in outputs
assert 'r2' in outputs
assert 'rmse' in outputs
assert 'cvrmse' in outputs
assert 'model_params' in outputs
index = pd.date_range(
'2011-01-01', freq='H', periods=365 * 24, tz=pytz.UTC)
formatted_predict_data = formatter.create_demand_fixture(index,
mock_isd_weather_source)
outputs, variance = model.predict(formatted_predict_data, summed=False)
assert outputs.shape == (365,)
assert variance > 0
index = | pd.date_range('2011-01-01', freq='D', periods=365, tz=pytz.UTC) | pandas.date_range |
"""
Evaluation of predictions againsts given dataset (in TXT format the same as training).
We expect that the predictions are in single folder and image names in dataset are the same
python evaluate.py \
--path_dataset ../model_data/VOC_2007_train.txt \
--path_results ../results \
--confidence 0.5 \
--iou 0.5 \
--visual
It generates
* statistic per image (mean over all classes)
* statistic per class (mean over all images)
See:
- https://github.com/rafaelpadilla/Object-Detection-Metrics
"""
import os
import sys
import argparse
import logging
from functools import partial
from pathos.multiprocessing import ProcessPool
import tqdm
import pandas as pd
sys.path += [os.path.abspath('.'), os.path.abspath('..')]
from keras_yolo3.utils import check_params_path, nb_workers, image_open, update_path
from keras_yolo3.model import compute_detect_metrics
from keras_yolo3.visual import draw_bounding_box
CSV_NAME_RESULTS_IMAGES = 'detection-results_conf=%.2f_iou=%.2f_stat-images.csv'
CSV_NAME_RESULTS_CLASSES = 'detection-results_conf=%.2f_iou=%.2f_stat-classes.csv'
ANNOT_COLUMNS = ('xmin', 'ymin', 'xmax', 'ymax', 'class')
# DETECT_COLUMNS = ('xmin', 'ymin', 'xmax', 'ymax', 'class', 'confidence')
TEMP_IMAGE_NAME = '%s_visual.jpg'
def parse_params():
# class YOLO defines the default value, so suppress any default HERE
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
parser.add_argument('-d', '--path_dataset', type=str, required=True,
help='path to the dataset, with single instance per line')
parser.add_argument('-r', '--path_results', type=str, required=True,
help='path to the predictions')
parser.add_argument('-c', '--confidence', type=float, required=False, default=0.5,
help='detection confidence score')
parser.add_argument('--iou', type=float, required=False, default=0.5,
help='intersection over union')
parser.add_argument('--nb_jobs', type=float, help='number of parallel processes',
default=0.9, required=False)
parser.add_argument('--visual', default=False, action='store_true',
help='visualize annot & predict')
arg_params = vars(parser.parse_args())
arg_params = check_params_path(arg_params)
logging.debug('PARAMETERS: \n %s', repr(arg_params))
return arg_params
def draw_export_bboxes(img_path, path_out, bboxes_anot, bboxes_pred):
img_name, _ = os.path.splitext(os.path.basename(img_path))
image = image_open(img_path)
for bb in bboxes_anot:
image = draw_bounding_box(image, bb[4], bb[:4], swap_xy=True,
color=(0, 255, 0), thickness=2)
for bb in bboxes_pred:
image = draw_bounding_box(image, bb[4], bb[:4], swap_xy=True,
color=(255, 0, 0), thickness=2)
name_visu = TEMP_IMAGE_NAME % img_name
path_visu = os.path.join(update_path(path_out), name_visu)
image.save(path_visu)
return path_visu
def eval_image(line, path_results, thr_confidence=0.5, thr_iou=0.5, path_out=None):
line_elems = line.strip().split()
img_path = line_elems[0]
img_name, _ = os.path.splitext(os.path.basename(img_path))
path_pred = os.path.join(path_results, '%s.csv' % img_name)
if not os.path.isfile(path_pred):
return None
boxes = [list(map(int, el.split(','))) for el in line_elems[1:]]
df_annot = pd.DataFrame(boxes, columns=list(ANNOT_COLUMNS))
if df_annot.empty:
df_annot = pd.DataFrame(columns=ANNOT_COLUMNS)
df_preds = pd.read_csv(path_pred, index_col=None)
if df_preds.empty:
df_preds = | pd.DataFrame(columns=ANNOT_COLUMNS) | pandas.DataFrame |
import pandas as pd
from sodapy import Socrata
import datetime
import definitions
# global variables for main data:
hhs_data, test_data, nyt_data_us, nyt_data_state, max_hosp_date = [],[],[],[],[]
"""
get_data()
Fetches data from API, filters, cleans, and combines with provisional.
After running, global variables are filled for use in subsequent functions
"""
def get_data():
global nyt_data_us
global nyt_data_state
global test_data
global hhs_data
global max_hosp_date
nyt_data_us = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/rolling-averages/us.csv")
nyt_data_state = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/rolling-averages/us-states.csv")
client = Socrata("healthdata.gov", None)
results = client.get("g62h-syeh", limit=2000000)
test_results = client.get("j8mb-icvb", limit=2000000)
print("LOG: Fetched all raw data")
# Filter data to get columns of interest
hhs_data = pd.DataFrame.from_records(results)[['state', 'date', 'inpatient_beds_used_covid']]
hhs_data.inpatient_beds_used_covid = hhs_data.inpatient_beds_used_covid.fillna(0)
hhs_data = hhs_data.astype({'inpatient_beds_used_covid': 'int32'})
test_data = pd.DataFrame.from_records(test_results)[['state', 'date', 'overall_outcome', 'new_results_reported']]
test_data.new_results_reported = test_data.new_results_reported.fillna(0)
test_data = test_data.astype({'new_results_reported': 'int32'})
print("LOG: Filtered Data")
# For provisional data, gets days since most recent update of HHS time series
max_date = hhs_data.date.max()
max_hosp_date = max_date
provisional = client.get("4cnb-m4rz", limit=2000000, where=f"update_date > '{max_date}'")
hhs_provisional = pd.DataFrame.from_records(provisional)[['update_date', 'archive_link']]
hhs_provisional.update_date = hhs_provisional.update_date.apply(lambda x: x[:10])
hhs_provisional.update_date = pd.to_datetime(hhs_provisional.update_date)
# Gets last archive of every day
group = hhs_provisional.groupby(['update_date'])
hhs_provisional = group.last()
# Add provisional data to HHS data
frames = []
for a in hhs_provisional.iterrows():
date = a[0]
url = a[1].item()['url']
df = pd.read_csv(url)[['state', 'inpatient_beds_used_covid']]
df['date']=date
if date > pd.Timestamp(max_date): # Avoids double counting if provisional update came after real update
frames.append(df)
frames.append(hhs_data)
hhs_data = (pd.concat(frames))
print("LOG: Added HHS Provisional data")
# Make date columns in proper format
# hhs_data.date = hhs_data.date.apply(lambda x: x[:10])
hhs_data.date= pd.to_datetime(hhs_data.date)
# hhs_data.to_csv("../data/hospitalizations.csv")
print("LOG: Wrote HHS data to CSV")
test_data.date = test_data.date.apply(lambda x: x[:10])
test_data.date = pd.to_datetime(test_data.date)
nyt_data_us.date = pd.to_datetime(nyt_data_us.date)
nyt_data_state.date = pd.to_datetime(nyt_data_state.date)
print("LOG: Done getting data")
"""
get_state_cases
Creates dataframe of time series date and cases for given state
inputs:
state_codes: List of 2-letter codes of states to query
start_date (pd.Timestamp): starting date, defaults to 1-1-2020
end_date (pd.Timestamp): ending date, defaults to today
returns:
df with 'date' and 'test_positivity'
"""
def get_state_cases(state_codes, start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today(), normalize=True):
curr_date = start_date
input_states = [definitions.states[s] for s in state_codes]
state_data = nyt_data_state[nyt_data_state.state.isin(input_states)][:]
max_date = state_data.date.max()
states_population = sum([definitions.populations[s] for s in input_states])
lst = []
while(curr_date <= end_date and curr_date <= max_date):
day_data = state_data[state_data.date == str(curr_date)]
if normalize:
case_sum = day_data.cases.sum() / states_population * 1000000
else:
case_sum = day_data.cases.sum()
newRow = {'date': curr_date, 'cases': case_sum}
lst.append(newRow)
curr_date += datetime.timedelta(1)
return pd.DataFrame(lst)
def get_us_cases(start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today()):
us_data = nyt_data_us[(nyt_data_us.date >= start_date) & (nyt_data_us.date <= end_date)]
return us_data[['date', 'cases']]
"""
get_state_deaths
Same as above, deaths
"""
def get_state_deaths(state_codes, start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today(), normalize=True):
curr_date = start_date
input_states = [definitions.states[s] for s in state_codes]
state_data = nyt_data_state[nyt_data_state.state.isin(input_states)]
max_date = state_data.date.max()
states_population = sum([definitions.populations[s] for s in input_states])
lst = []
while(curr_date <= end_date and curr_date <= max_date):
day_data = state_data[state_data.date == str(curr_date)]
if normalize:
case_sum = day_data.deaths.sum() / states_population * 1000000
else:
case_sum = day_data.deaths.sum()
newRow = {'date': curr_date, 'deaths': case_sum}
lst.append(newRow)
curr_date += datetime.timedelta(1)
return pd.DataFrame(lst)
def get_us_deaths(start_date = | pd.Timestamp(2020,1,1) | pandas.Timestamp |
import os
from pandas import DataFrame, read_csv
from networkx import DiGraph, write_gpickle, read_gpickle
from memory_profiler import profile
from app.decorators.number_decorators import fmt_n
from app.job import Job
from app.bq_service import BigQueryService
from app.file_storage import FileStorage
DATE = os.getenv("DATE", default="2020-01-23")
TWEET_MIN = int(os.getenv("TWEET_MIN", default="1")) # CHANGED
LIMIT = os.getenv("LIMIT")
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="100000"))
DESTRUCTIVE = (os.getenv("DESTRUCTIVE", default="false") == "true")
#GRAPH_LIMIT = os.getenv("GRAPH_LIMIT")
GRAPH_BATCH_SIZE = int(os.getenv("GRAPH_BATCH_SIZE", default="10000"))
GRAPH_DESTRUCTIVE = (os.getenv("GRAPH_DESTRUCTIVE", default="false") == "true")
#@profile
def load_graph(local_graph_filepath):
print("LOADING GRAPH...")
graph = read_gpickle(local_graph_filepath)
print(type(graph), fmt_n(graph.number_of_nodes()), fmt_n(graph.number_of_edges()))
return graph
if __name__ == "__main__":
print("------------------------")
print("GRAPHER...")
print(" DATE:", DATE)
print(" TWEET_MIN:", TWEET_MIN)
print(" LIMIT:", LIMIT)
print(" BATCH_SIZE:", BATCH_SIZE)
print(" DESTRUCTIVE:", DESTRUCTIVE)
#print(" GRAPH_LIMIT:", GRAPH_LIMIT)
print(" GRAPH_BATCH_SIZE:", GRAPH_BATCH_SIZE)
print(" GRAPH_DESTRUCTIVE:", GRAPH_DESTRUCTIVE)
print("------------------------")
storage = FileStorage(dirpath=f"daily_active_friend_graphs_v4/{DATE}/tweet_min/{TWEET_MIN}")
tweets_csv_filepath = os.path.join(storage.local_dirpath, "tweets.csv")
bq_service = BigQueryService()
job = Job()
#
# LOAD TWEETS
# tweet_id, text, screen_name, bot, created_at
# TODO: de-dup RTs so the model will only train/test on a single RT status text (PREVENT OVERFITTING)
if os.path.exists(tweets_csv_filepath) and not DESTRUCTIVE:
print("LOADING TWEETS...")
statuses_df = read_csv(tweets_csv_filepath)
else:
job.start()
print("DOWNLOADING TWEETS...")
statuses = []
for row in bq_service.fetch_daily_active_tweeter_statuses_for_model_training(date=DATE, tweet_min=TWEET_MIN, limit=LIMIT):
statuses.append(dict(row))
job.counter += 1
if job.counter % BATCH_SIZE == 0:
job.progress_report()
job.end()
statuses_df = DataFrame(statuses)
del statuses
statuses_df.to_csv(tweets_csv_filepath)
print("STATUSES:", fmt_n(len(statuses_df)))
#
# MAKE GRAPH
local_nodes_csv_filepath = os.path.join(storage.local_dirpath, "active_nodes.csv")
local_graph_csv_filepath = os.path.join(storage.local_dirpath, "active_edge_graph.csv") #CHANGED
if os.path.exists(local_nodes_csv_filepath) and os.path.exists(local_graph_csv_filepath) and not GRAPH_DESTRUCTIVE:
nodes_df = | read_csv(local_nodes_csv_filepath) | pandas.read_csv |
# Title: Weather Data Aggregator
# Description: Aggregates data from the weather station on Cockcroft from the OnCall API.
# Author: <NAME>
# Date: 17/12/2020
# Version: 1.0
# Import libraries
import pandas as pd
from pandas import json_normalize
import json
import requests
from datetime import datetime, timedelta
from app import csvDump
import os
# Variable Declarations
URL = "http://172.16.17.32/panasense.oncall.finestra.live/api"
ENDPOINT = "/dailypollarchive"
DEVICE = "0FF00FFA2DBB4A029D2902CD33A43364" # Cockcroft Weather Station GUID
ACTION_IDENT_TEMP = "AD7396F9F28D4DA798F0370934C368A9" # Air Tempertaure in C endpoint GUID
ACTION_IDENT_HUM = "8C5DAA6DB83E4E5C8310A27F6E549527" # Relative Humidity endpoint GUID
ACTION_IDENT_PRE = "E589656878094D03A1554197DC90B5B5" # Pressure endpoint GUID
ACTION_IDENT_RF_MM = "90828B8769E74A5B9F74761335CB1676" # Rainfall in mm endpoint GUID
ACTION_IDENT_WS_MS = "B04BE963E74F467A875C534B90BE05A0" # Windspeed in ms endpoint GUID
ACTION_IDENT_WD_D = "752FC7FCFE584FBF980E2FFCAD991D87" # Wind direction endpoint GUID
ACTION_IDENT_SOL_KWM2 = "4EF9B920C87444939DE8069D37ECA200" # Solar Radiation endpoint GUID
START = "2021-03-01T00:00:00"
END = "2021-09-08T23:00:00"
dropCols = ['RECID','Limit','DeviceGUID','ActionGUID','PollType','RV']
# POST credentials
with open("./config/.onCallAPI.json") as f:
accessToken = json.load(f)
API_KEY = accessToken['TOKEN']
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S" # Date format for parsing datetime returned by OnCall API
sd = pd.DataFrame()
sdH = pd.DataFrame()
sdP = pd.DataFrame()
sdRF = pd.DataFrame()
sdWS = pd.DataFrame()
sdWSD = pd.DataFrame()
sdS = pd.DataFrame()
# Function declaration
def RESAMPLE_DATA(df, RESAMPLE_PERIOD = '60min'):
#df.index.names = ['Datetime']
df = df.resample(RESAMPLE_PERIOD).mean() # Resample data to an average over a defined period
df = df.reindex(pd.date_range(df.index.min(), df.index.max(), freq=RESAMPLE_PERIOD))
return df
def RENAME_COLUMNS(df, valName, inplaceB = True):
df.rename(columns={"PollTimeStamp": "Datetime", "VarValue": valName}, inplace = inplaceB) # Rename used columns to more appropriate names
return df
# Main body
start = datetime.strptime(START, DATE_FORMAT).date()
end = datetime.strptime(END, DATE_FORMAT).date()
step = timedelta(days=1) # Sets the step gap for getting data in specific increments
while start < end:
stepStart = start.strftime(DATE_FORMAT)
start += step
stepEnd = start.strftime(DATE_FORMAT)
print('Step Start ' + str(stepStart) + ' | Step End ' + str(stepEnd))
# Fetch Temperature data
REQUEST_URL = URL + ENDPOINT + "/" + DEVICE + "/" + ACTION_IDENT_TEMP + "?start=" + stepStart + "&end=" + stepEnd + "&api_key=" + API_KEY # API URL for temperature data
response = requests.get(REQUEST_URL)
#print("Temperature Endpoint Status " + str(response.status_code))
if (response.status_code != 200): break # Break the loop is the returned status code is not HTTP 200
jsonLoad = response.json() # Load the recieved JSON file from the request
sensorData = json_normalize(jsonLoad) # Convert the JSONs into pandas dataframes
if not (sensorData.empty):
#print('Valid content returned')
sensorData = RENAME_COLUMNS(sensorData, "Temperature in C") # Rename used columns to more appropriate names
sensorData.drop(dropCols, axis=1, inplace=True) # Drop irrelevant variables
sensorData['Datetime'] = pd.to_datetime(sensorData['Datetime'])
sensorData = sensorData.set_index('Datetime')
#csvDump("Temperature", RESAMPLE_DATA(sensorData), index_set = True, index_label_usr = "Datetime")
else:
print("Temperature Response Empty, skipping...")
# Fetch Humidity data
REQUEST_URL = URL + ENDPOINT + "/" + DEVICE + "/" + ACTION_IDENT_HUM + "?start=" + stepStart + "&end=" + stepEnd + "&api_key=" + API_KEY # API URl for humidity data
response = requests.get(REQUEST_URL)
#print("Humidity Endpoint Status " + str(response.status_code))
if (response.status_code != 200): break # Break the loop is the returned status code is not HTTP 200
jsonLoad = response.json() # Load the recieved JSON file from the request
sensorDataHum = json_normalize(jsonLoad) # Convert the JSONs into pandas dataframes
if not (sensorDataHum.empty):
sensorDataHum = RENAME_COLUMNS(sensorDataHum, "Humidity in %") # Rename used columns to more appropriate names
sensorDataHum.drop(dropCols, axis=1, inplace=True) # Drop irrelevant variables
sensorDataHum['Datetime'] = pd.to_datetime(sensorDataHum['Datetime'])
sensorDataHum = sensorDataHum.set_index('Datetime')
#csvDump("Humidity", RESAMPLE_DATA(sensorDataHum), index_set = True, index_label_usr = "Datetime")
else:
print("Humidity Response Empty, skipping...")
# Fetch Pressure data
REQUEST_URL = URL + ENDPOINT + "/" + DEVICE + "/" + ACTION_IDENT_PRE + "?start=" + stepStart + "&end=" + stepEnd + "&api_key=" + API_KEY # API URl for humidity data
response = requests.get(REQUEST_URL)
#print("Pressure Endpoint Status " + str(response.status_code))
if (response.status_code != 200): break # Break the loop is the returned status code is not HTTP 200
jsonLoad = response.json() # Load the recieved JSON file from the request
sensorDataPre = json_normalize(jsonLoad) # Convert the JSONs into pandas dataframes
if not (sensorDataPre.empty):
sensorDataPre = RENAME_COLUMNS(sensorDataPre, "Pressure in mBar") # Rename used columns to more appropriate names
sensorDataPre.drop(dropCols, axis=1, inplace=True) # Drop irrelevant variables
sensorDataPre['Datetime'] = pd.to_datetime(sensorDataPre['Datetime'])
sensorDataPre = sensorDataPre.set_index('Datetime')
#csvDump("Pressure", RESAMPLE_DATA(sensorDataPre), index_set = True, index_label_usr = "Datetime")
else:
print("Pressure Response Empty, skipping...")
# Fetch Rainfall mm data
REQUEST_URL = URL + ENDPOINT + "/" + DEVICE + "/" + ACTION_IDENT_RF_MM + "?start=" + stepStart + "&end=" + stepEnd + "&api_key=" + API_KEY # API URl for humidity data
response = requests.get(REQUEST_URL)
#print("Rainfall Endpoint Status " + str(response.status_code))
if (response.status_code != 200): break # Break the loop is the retur bned status code is not HTTP 200
jsonLoad = response.json() # Load the recieved JSON file from the request
sensorDataRF = json_normalize(jsonLoad) # Convert the JSONs into pandas dataframes
if not (sensorDataRF.empty):
sensorDataRF = RENAME_COLUMNS(sensorDataRF, "Rainfall in mm") # Rename used columns to more appropriate names
sensorDataRF.drop(dropCols, axis=1, inplace=True) # Drop irrelevant variables
sensorDataRF['Datetime'] = pd.to_datetime(sensorDataRF['Datetime'])
sensorDataRF = sensorDataRF.set_index('Datetime')
#csvDump("Rainfall", RESAMPLE_DATA(sensorDataRF), index_set = True, index_label_usr = "Datetime")
else:
print("Rainfall Response Empty, skipping...")
# Fetch Windspeed m/s data
REQUEST_URL = URL + ENDPOINT + "/" + DEVICE + "/" + ACTION_IDENT_WS_MS + "?start=" + stepStart + "&end=" + stepEnd + "&api_key=" + API_KEY # API URl for humidity data
response = requests.get(REQUEST_URL)
#print("Windspeed Endpoint Status " + str(response.status_code))
if (response.status_code != 200): break # Break the loop is the returned status code is not HTTP 200
jsonLoad = response.json() # Load the recieved JSON file from the request
sensorDataWS = json_normalize(jsonLoad) # Convert the JSONs into pandas dataframes
if not (sensorDataWS.empty):
sensorDataWS = RENAME_COLUMNS(sensorDataWS, "Windspeed in ms") # Rename used columns to more appropriate names
sensorDataWS.drop(dropCols, axis=1, inplace=True) # Drop irrelevant variables
sensorDataWS['Datetime'] = pd.to_datetime(sensorDataWS['Datetime'])
sensorDataWS = sensorDataWS.set_index('Datetime')
#csvDump("Windspeed", RESAMPLE_DATA(sensorDataWS), index_set = True, index_label_usr = "Datetime")
else:
print("Windspeed Response Empty, skipping...")
# Fetch Windspeed direction in degrees data
REQUEST_URL = URL + ENDPOINT + "/" + DEVICE + "/" + ACTION_IDENT_WD_D + "?start=" + stepStart + "&end=" + stepEnd + "&api_key=" + API_KEY # API URl for humidity data
response = requests.get(REQUEST_URL)
#print("Wind direction Endpoint Status " + str(response.status_code))
if (response.status_code != 200): break # Break the loop is the returned status code is not HTTP 200
jsonLoad = response.json() # Load the recieved JSON file from the request
sensorDataWSD = | json_normalize(jsonLoad) | pandas.json_normalize |
"""
Enrich Stocks and ETF data with different indicators and generates a CSV file for analysis
"""
import argparse
from datetime import datetime
from pathlib import Path
import pandas as pd
from common.analyst import fetch_data_from_cache
from common.filesystem import output_dir
from common.market import load_all_tickers
from common.subprocess_runner import run_cmd
from common.symbols import macro_etfs
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"-v",
"--view-in-browser",
action="store_true",
default=False,
help="Open dTale in browser",
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
view_in_browser = args.view_in_browser
stock_tickers = load_all_tickers()
etf_tickers = macro_etfs.keys()
print(f"Analysing {len(stock_tickers)} stocks and {len(etf_tickers)} etfs")
stocks_db = filter(
lambda val: val,
[fetch_data_from_cache(stock, is_etf=False) for stock in stock_tickers],
)
etfs_db = filter(
lambda val: val,
[fetch_data_from_cache(etf, is_etf=True) for etf in etf_tickers],
)
combined_db = list(stocks_db) + list(etfs_db)
file_path = "{}/{}-data.csv".format(
output_dir(), datetime.now().strftime("%Y-%m-%d")
)
scanner_df = | pd.DataFrame(combined_db, copy=True) | pandas.DataFrame |
import unittest
import pandas as pd
import numpy as np
from autopandas_v2.ml.featurization.featurizer import RelationGraph
from autopandas_v2.ml.featurization.graph import GraphEdge, GraphEdgeType, GraphNodeType, GraphNode
from autopandas_v2.ml.featurization.options import GraphOptions
get_node_type = GraphNodeType.get_node_type
class TestRelationGraphFeaturizer(unittest.TestCase):
def test_basic_max(self):
input_df = pd.DataFrame([[1, 2], [2, 3], [2, 0]])
input_00 = GraphNode("I0", '[0,0]', get_node_type(input_df.iat[0, 0]))
input_01 = GraphNode("I0", '[0,1]', get_node_type(input_df.iat[0, 1]))
input_10 = GraphNode("I0", '[1,0]', get_node_type(input_df.iat[1, 0]))
input_11 = GraphNode("I0", '[1,1]', get_node_type(input_df.iat[1, 1]))
input_20 = GraphNode("I0", '[2,0]', get_node_type(input_df.iat[2, 0]))
input_21 = GraphNode("I0", '[2,1]', get_node_type(input_df.iat[2, 1]))
output_df = pd.DataFrame([[2, 3]])
output_00 = GraphNode("O0", '[0,0]', get_node_type(output_df.iat[0, 0]))
output_01 = GraphNode("O0", '[0,1]', get_node_type(output_df.iat[0, 1]))
options = GraphOptions()
options.NODE_TYPES = True
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_df], output_df)
rel_graph_edges = rel_graph.edges
# positional edges
positional_edges = [
GraphEdge(input_00, input_01, GraphEdgeType.ADJACENCY),
GraphEdge(input_00, input_10, GraphEdgeType.ADJACENCY),
GraphEdge(input_10, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(input_10, input_20, GraphEdgeType.ADJACENCY),
GraphEdge(input_20, input_21, GraphEdgeType.ADJACENCY),
GraphEdge(input_01, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(input_11, input_21, GraphEdgeType.ADJACENCY),
GraphEdge(output_00, output_01, GraphEdgeType.ADJACENCY)
]
for edge in positional_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
# equality edges
equality_edges = [
GraphEdge(input_10, output_00, GraphEdgeType.EQUALITY),
GraphEdge(input_20, output_00, GraphEdgeType.EQUALITY),
GraphEdge(input_01, output_00, GraphEdgeType.EQUALITY), # redundant
GraphEdge(input_11, output_01, GraphEdgeType.EQUALITY)
]
for edge in equality_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
def test_max_series(self):
input_df = pd.DataFrame([[1, 2], [2, 3], [2, 0]])
input_00 = GraphNode("I0", '[0,0]', get_node_type(input_df.iat[0, 0]))
input_01 = GraphNode("I0", '[0,1]', get_node_type(input_df.iat[0, 1]))
input_10 = GraphNode("I0", '[1,0]', get_node_type(input_df.iat[1, 0]))
input_11 = GraphNode("I0", '[1,1]', get_node_type(input_df.iat[1, 1]))
input_20 = GraphNode("I0", '[2,0]', get_node_type(input_df.iat[2, 0]))
input_21 = GraphNode("I0", '[2,1]', get_node_type(input_df.iat[2, 1]))
output = pd.DataFrame.max(input_df)
output_00 = GraphNode("O0", '[0,0]', get_node_type(output.iat[0]))
output_10 = GraphNode("O0", '[1,0]', get_node_type(output.iat[1]))
options = GraphOptions()
options.NODE_TYPES = True
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_df], output)
rel_graph_edges = rel_graph.edges
# positional edges
positional_edges = [
GraphEdge(input_00, input_01, GraphEdgeType.ADJACENCY),
GraphEdge(input_00, input_10, GraphEdgeType.ADJACENCY),
GraphEdge(input_10, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(input_10, input_20, GraphEdgeType.ADJACENCY),
GraphEdge(input_20, input_21, GraphEdgeType.ADJACENCY),
GraphEdge(input_01, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(input_11, input_21, GraphEdgeType.ADJACENCY),
GraphEdge(output_00, output_10, GraphEdgeType.ADJACENCY)
]
for edge in positional_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
# equality edges
equality_edges = [
GraphEdge(input_10, output_00, GraphEdgeType.EQUALITY),
GraphEdge(input_20, output_00, GraphEdgeType.EQUALITY),
GraphEdge(input_01, output_00, GraphEdgeType.EQUALITY), # redundant
GraphEdge(input_11, output_10, GraphEdgeType.EQUALITY)
]
for edge in equality_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
def test_values(self):
input_df = pd.DataFrame([[1, 2], [3, 4]])
input_00 = GraphNode("I0", '[0,0]', get_node_type(input_df.iat[0, 0]))
input_01 = GraphNode("I0", '[0,1]', get_node_type(input_df.iat[0, 1]))
input_10 = GraphNode("I0", '[1,0]', get_node_type(input_df.iat[1, 0]))
input_11 = GraphNode("I0", '[1,1]', get_node_type(input_df.iat[1, 1]))
output = input_df.values
output_00 = GraphNode("O0", '[0,0]', get_node_type(output[0, 0]))
output_01 = GraphNode("O0", '[0,1]', get_node_type(output[0, 1]))
output_10 = GraphNode("O0", '[1,0]', get_node_type(output[1, 0]))
output_11 = GraphNode("O0", '[1,1]', get_node_type(output[1, 1]))
options = GraphOptions()
options.NODE_TYPES = True
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_df], output)
rel_graph_edges = rel_graph.edges
# positional edges
positional_edges = [
GraphEdge(input_00, input_01, GraphEdgeType.ADJACENCY),
GraphEdge(input_00, input_10, GraphEdgeType.ADJACENCY),
GraphEdge(input_10, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(input_01, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(output_00, output_01, GraphEdgeType.ADJACENCY),
GraphEdge(output_00, output_10, GraphEdgeType.ADJACENCY),
GraphEdge(output_10, output_11, GraphEdgeType.ADJACENCY),
GraphEdge(output_01, output_11, GraphEdgeType.ADJACENCY)
]
for edge in positional_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
equality_edges = [
GraphEdge(input_00, output_00, GraphEdgeType.EQUALITY),
GraphEdge(input_10, output_10, GraphEdgeType.EQUALITY),
GraphEdge(input_01, output_01, GraphEdgeType.EQUALITY),
GraphEdge(input_11, output_11, GraphEdgeType.EQUALITY)
]
for edge in equality_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
def test_dict(self):
input_df = pd.DataFrame([[1, 2], [3, 4]])
input_00 = GraphNode("I0", '[0,0]', get_node_type(input_df.iat[0, 0]))
input_01 = GraphNode("I0", '[0,1]', get_node_type(input_df.iat[0, 1]))
input_10 = GraphNode("I0", '[1,0]', get_node_type(input_df.iat[1, 0]))
input_11 = GraphNode("I0", '[1,1]', get_node_type(input_df.iat[1, 1]))
output = {"A": [1, 3], "B": [2, 4]}
output_00 = GraphNode("O0", '[0,0]', get_node_type(output['A'][0]))
output_01 = GraphNode("O0", '[0,1]', get_node_type(output['B'][0]))
output_10 = GraphNode("O0", '[1,0]', get_node_type(output['A'][1]))
output_11 = GraphNode("O0", '[1,1]', get_node_type(output['B'][1]))
options = GraphOptions()
options.NODE_TYPES = True
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_df], output)
rel_graph_edges = rel_graph.edges
positional_edges = [
GraphEdge(input_00, input_01, GraphEdgeType.ADJACENCY),
GraphEdge(input_00, input_10, GraphEdgeType.ADJACENCY),
GraphEdge(input_10, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(input_01, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(output_00, output_01, GraphEdgeType.ADJACENCY),
GraphEdge(output_00, output_10, GraphEdgeType.ADJACENCY),
GraphEdge(output_10, output_11, GraphEdgeType.ADJACENCY),
GraphEdge(output_01, output_11, GraphEdgeType.ADJACENCY)
]
for edge in positional_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
equality_edges = [
GraphEdge(input_00, output_00, GraphEdgeType.EQUALITY),
GraphEdge(input_10, output_10, GraphEdgeType.EQUALITY),
GraphEdge(input_01, output_01, GraphEdgeType.EQUALITY),
GraphEdge(input_11, output_11, GraphEdgeType.EQUALITY)
]
for edge in equality_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
def test_groupby_output(self):
input_df = pd.DataFrame({
"Name": ["Alice", "Bob", "Mallory", "Mallory", "Bob", "Mallory"],
"City": ["Seattle", "Seattle", "Portland", "Seattle", "Seattle", "Portland"]})
output = input_df.groupby("Name")
options = GraphOptions()
options.NODE_TYPES = True
options.ADJACENCY_EDGES = False
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_df], output)
rel_graph_edges = rel_graph.edges
alice_nodes_in = [
GraphNode("I0", '[0,0]', GraphNodeType.STR)
]
alice_nodes_out = [
GraphNode("O0_0", '[0,0]', GraphNodeType.STR)
]
bob_nodes_in = [
GraphNode("I0", '[1,0]', GraphNodeType.STR),
GraphNode("I0", '[4,0]', GraphNodeType.STR)
]
bob_nodes_out = [
GraphNode("O0_1", '[0,0]', GraphNodeType.STR),
GraphNode("O0_1", '[1,0]', GraphNodeType.STR)
]
mallory_nodes_in = [
GraphNode("I0", '[2,0]', GraphNodeType.STR),
GraphNode("I0", '[3,0]', GraphNodeType.STR),
GraphNode("I0", '[5,0]', GraphNodeType.STR)
]
mallory_nodes_out = [
GraphNode("O0_2", '[0,0]', GraphNodeType.STR),
GraphNode("O0_2", '[1,0]', GraphNodeType.STR),
GraphNode("O0_2", '[2,0]', GraphNodeType.STR)
]
seattle_nodes_in = [
GraphNode("I0", '[0,1]', GraphNodeType.STR),
GraphNode("I0", '[1,1]', GraphNodeType.STR),
GraphNode("I0", '[3,1]', GraphNodeType.STR),
GraphNode("I0", '[4,1]', GraphNodeType.STR),
]
seattle_nodes_out = [
GraphNode("O0_0", '[0,1]', GraphNodeType.STR),
GraphNode("O0_1", '[0,1]', GraphNodeType.STR),
GraphNode("O0_2", '[1,1]', GraphNodeType.STR)
]
portland_nodes_in = [
GraphNode("I0", '[2,1]', GraphNodeType.STR),
GraphNode("I0", '[5,1]', GraphNodeType.STR)
]
portland_nodes_out = [
GraphNode("O0_2", '[0,1]', GraphNodeType.STR),
GraphNode("O0_2", '[2,1]', GraphNodeType.STR)
]
def check_edges(in_nodes, out_nodes):
for in_node in in_nodes:
for out_node in out_nodes:
edge = GraphEdge(in_node, out_node, GraphEdgeType.EQUALITY)
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
check_edges(alice_nodes_in, alice_nodes_out)
check_edges(bob_nodes_in, bob_nodes_out)
check_edges(mallory_nodes_in, mallory_nodes_out)
check_edges(portland_nodes_in, portland_nodes_out)
check_edges(seattle_nodes_in, seattle_nodes_out)
def test_groupby_input(self):
df = pd.DataFrame({
"Name": ["Alice", "Bob", "Mallory", "Mallory", "Bob", "Mallory"],
"City": ["Seattle", "Seattle", "Portland", "Seattle", "Seattle", "Portland"]})
input_ = df.groupby("Name")
output = input_.count().reset_index()
options = GraphOptions()
options.NODE_TYPES = True
options.ADJACENCY_EDGES = False
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_], output)
rel_graph_edges = rel_graph.edges
alice_nodes_in = [
GraphNode("I0_0", '[0,0]', GraphNodeType.STR)
]
alice_nodes_out = [
GraphNode("O0", '[0,0]', GraphNodeType.STR)
]
bob_nodes_in = [
GraphNode("I0_1", '[0,0]', GraphNodeType.STR),
GraphNode("I0_1", '[1,0]', GraphNodeType.STR)
]
bob_nodes_out = [
GraphNode("O0", '[1,0]', GraphNodeType.STR)
]
mallory_nodes_in = [
GraphNode("I0_2", '[0,0]', GraphNodeType.STR),
GraphNode("I0_2", '[1,0]', GraphNodeType.STR),
GraphNode("I0_2", '[2,0]', GraphNodeType.STR)
]
mallory_nodes_out = [
GraphNode("O0", '[2,0]', GraphNodeType.STR)
]
def check_edges(in_nodes, out_nodes):
for in_node in in_nodes:
for out_node in out_nodes:
edge = GraphEdge(in_node, out_node, GraphEdgeType.EQUALITY)
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
check_edges(alice_nodes_in, alice_nodes_out)
check_edges(bob_nodes_in, bob_nodes_out)
check_edges(mallory_nodes_in, mallory_nodes_out)
def test_idx_multi(self):
tuples = [("bar", "one"), ("bar", "two")]
index = pd.MultiIndex.from_tuples(tuples)
data = [[0], [1]]
input_df = pd.DataFrame(data, index=index)
# 0
# bar one 0
# two 1
output_df = input_df.unstack()
# 0
# one two
# bar 0 1
options = GraphOptions()
options.COLUMN_NODES = True
options.INDEX_NODES = True
options.ADJACENCY_EDGES = True
options.EQUALITY_EDGES = True
options.NODE_TYPES = True
options.INDEX_EDGES = True
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_df], output_df)
rel_graph_edges = rel_graph.edges
bar_in_0 = GraphNode("I0", '[0,-2]', GraphNodeType.INDEX)
bar_in_1 = GraphNode("I0", '[1,-2]', GraphNodeType.INDEX)
bar_out = GraphNode("O0", '[0,-1]', GraphNodeType.INDEX)
one_in = GraphNode("I0", '[0,-1]', GraphNodeType.INDEX)
two_in = GraphNode("I0", '[1,-1]', GraphNodeType.INDEX)
one_out = GraphNode("O0", '[-1,0]', GraphNodeType.COLUMN)
two_out = GraphNode("O0", '[-1,1]', GraphNodeType.COLUMN)
in_0 = GraphNode("I0", '[0,0]', GraphNodeType.INT)
in_1 = GraphNode("I0", '[1,0]', GraphNodeType.INT)
out_0 = GraphNode("O0", '[0,0]', GraphNodeType.INT)
out_1 = GraphNode("O0", '[0,1]', GraphNodeType.INT)
adjacency_edges = [
GraphEdge(bar_in_0, bar_in_1, GraphEdgeType.ADJACENCY),
GraphEdge(bar_in_0, one_in, GraphEdgeType.ADJACENCY),
GraphEdge(bar_in_1, two_in, GraphEdgeType.ADJACENCY),
GraphEdge(one_in, two_in, GraphEdgeType.ADJACENCY)
]
for edge in adjacency_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
indexing_edges = [
GraphEdge(bar_in_0, in_0, GraphEdgeType.INDEX),
GraphEdge(one_in, in_0, GraphEdgeType.INDEX),
GraphEdge(bar_in_1, in_1, GraphEdgeType.INDEX),
GraphEdge(two_in, in_1, GraphEdgeType.INDEX),
GraphEdge(bar_out, out_0, GraphEdgeType.INDEX),
GraphEdge(bar_out, out_1, GraphEdgeType.INDEX)
]
for edge in indexing_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
equality_edges = [
GraphEdge(bar_in_0, bar_out, GraphEdgeType.EQUALITY),
GraphEdge(bar_in_1, bar_out, GraphEdgeType.EQUALITY),
GraphEdge(one_in, one_out, GraphEdgeType.EQUALITY),
GraphEdge(two_in, two_out, GraphEdgeType.EQUALITY)
]
for edge in equality_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
def test_column_multi(self):
column_labels = [['bar', 'bar', 'baz', 'baz'], ['one', 'two', 'one', 'two']]
tuples = list(zip(*column_labels))
col_index = pd.MultiIndex.from_tuples(tuples)
data = [[0, 1, 2, 3], [4, 5, 6, 7]]
input_df = pd.DataFrame(data, columns=col_index)
# bar baz
# one two one two
# 0 0 1 2 3
# 1 4 5 6 7
output_df = input_df.stack().reset_index()
# level_0 level_1 bar baz
# 0 0 one 0 2
# 1 0 two 1 3
# 2 1 one 4 6
# 3 1 two 5 7
options = GraphOptions()
options.COLUMN_NODES = True
options.ADJACENCY_EDGES = True
options.EQUALITY_EDGES = True
options.NODE_TYPES = True
options.INDEX_EDGES = True
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_df], output_df)
rel_graph_edges = rel_graph.edges
col_nodes = [[GraphNode("I0", '[-2,0]', GraphNodeType.COLUMN),
GraphNode("I0", '[-2,1]', GraphNodeType.COLUMN),
GraphNode("I0", '[-2,2]', GraphNodeType.COLUMN),
GraphNode("I0", '[-2,3]', GraphNodeType.COLUMN)],
[GraphNode("I0", '[-1,0]', GraphNodeType.COLUMN),
GraphNode("I0", '[-1,1]', GraphNodeType.COLUMN),
GraphNode("I0", '[-1,2]', GraphNodeType.COLUMN),
GraphNode("I0", '[-1,3]', GraphNodeType.COLUMN)],
]
adjacency_edges = [
GraphEdge(col_nodes[0][0], col_nodes[1][0], GraphEdgeType.ADJACENCY),
GraphEdge(col_nodes[0][0], col_nodes[0][1], GraphEdgeType.ADJACENCY),
GraphEdge(col_nodes[1][0], col_nodes[1][1], GraphEdgeType.ADJACENCY),
GraphEdge(col_nodes[1][1], col_nodes[1][2], GraphEdgeType.ADJACENCY),
GraphEdge(col_nodes[0][1], col_nodes[1][1], GraphEdgeType.ADJACENCY),
GraphEdge(col_nodes[0][1], col_nodes[0][2], GraphEdgeType.ADJACENCY),
GraphEdge(col_nodes[0][2], col_nodes[1][2], GraphEdgeType.ADJACENCY),
GraphEdge(col_nodes[0][2], col_nodes[0][3], GraphEdgeType.ADJACENCY),
GraphEdge(col_nodes[1][2], col_nodes[1][3], GraphEdgeType.ADJACENCY),
GraphEdge(col_nodes[0][3], col_nodes[1][3], GraphEdgeType.ADJACENCY)
]
for edge in adjacency_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
# indexing edges
input_coli_elems = [
[GraphNode("I0", '[0,0]', GraphNodeType.INT),
GraphNode("I0", '[1,0]', GraphNodeType.INT)],
[GraphNode("I0", '[0,1]', GraphNodeType.INT),
GraphNode("I0", '[1,1]', GraphNodeType.INT)],
[GraphNode("I0", '[0,2]', GraphNodeType.INT),
GraphNode("I0", '[1,2]', GraphNodeType.INT)],
[GraphNode("I0", '[0,3]', GraphNodeType.INT),
GraphNode("I0", '[1,3]', GraphNodeType.INT)]
]
def check_edges(in_nodes, out_nodes, edge_type):
for in_node in in_nodes:
for out_node in out_nodes:
edge = GraphEdge(in_node, out_node, edge_type)
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
for i in range(4):
in_nodes = [col_nodes[0][i], col_nodes[1][i]]
out_nodes = input_coli_elems[i]
check_edges(in_nodes, out_nodes, GraphEdgeType.INDEX)
# equality_edges
bars = [col_nodes[0][0], col_nodes[0][1]]
bazs = [col_nodes[0][2], col_nodes[0][3]]
ones = [col_nodes[1][0], col_nodes[1][2]]
twos = [col_nodes[1][1], col_nodes[1][3]]
out_01 = GraphNode("O0", '[0,1]', GraphNodeType.STR)
out_11 = GraphNode("O0", '[1,1]', GraphNodeType.STR)
out_21 = GraphNode("O0", '[2,1]', GraphNodeType.STR)
out_31 = GraphNode("O0", '[3,1]', GraphNodeType.STR)
out_col_2 = GraphNode("O0", '[-1,2]', GraphNodeType.COLUMN)
out_col_3 = GraphNode("O0", '[-1,3]', GraphNodeType.COLUMN)
check_edges(bars, [out_col_2], GraphEdgeType.EQUALITY)
check_edges(bazs, [out_col_3], GraphEdgeType.EQUALITY)
check_edges(ones, [out_01, out_21], GraphEdgeType.EQUALITY)
check_edges(twos, [out_11, out_31], GraphEdgeType.EQUALITY)
def test_no_spurious_for_idx_arg(self):
df = | pd.DataFrame([[5, 2], [2, 3], [2, 0]], columns=["A", "B"]) | pandas.DataFrame |
import pandas as pd
import sqlalchemy
from constants import DB_FOLDER, SYMBOL
import matplotlib.pyplot as plt
def create_engine(symbol):
engine = sqlalchemy.create_engine(f"sqlite:///{DB_FOLDER}/{symbol}-stream.db")
return engine
def fetch_dataframe(symbol, engine):
try:
return | pd.read_sql(symbol, engine) | pandas.read_sql |
import numpy as np
import pandas as pd
from scipy.io import loadmat
from tqdm import tqdm
ORIG_AU_NAMES = [
'AU1', 'AU1-2', 'AU2', 'AU2L', 'AU4', 'AU5', 'AU6', 'AU6L', 'AU6R', 'AU7L', 'AU7R', 'AU9',
'AU10Open', 'AU10LOpen', 'AU10ROpen', 'AU11L', 'AU11R', 'AU12', 'AU25-12', 'AU12L', 'AU12R',
'AU13', 'AU14', 'AU14L', 'AU14R', 'AU15', 'AU16Open', 'AU17', 'AU20', 'AU20L', 'AU20R', 'AU22',
'AU23', 'AU24', 'AU25', 'AU26', 'AU27i', 'AU38', 'AU39', 'AU43', 'AU7', 'AU12-6'
]
rename_au = {'AU10Open': 'AU10', 'AU10LOpen': 'AU10L', 'AU10ROpen': 'AU10R', 'AU16Open': 'AU16', 'AU27i': 'AU27'}
au_names = [rename_au[name] if name in rename_au.keys() else name for name in ORIG_AU_NAMES]
emo_names = ['happy', 'surprise', 'fear', 'disgust', 'anger', 'sadness', 'other']
resp_mapper = {}
i = 1
for emo in emo_names:
for inten in range(1, 6):
resp_mapper[i] = {'emotion': emo, 'intensity': inten}
i += 1
mat = loadmat('data/raw/EA_data_Lukas.mat')
resp = mat['responses'].squeeze()
stim_au = mat['stim_au_patterns']
stim_gender = mat['stim_gend'].squeeze()
stim_id = mat['stim_id'].squeeze()
sub_idx = mat['participants'].squeeze()
subs = np.unique(sub_idx)
for i, sub in tqdm(enumerate(subs)):
idx = sub_idx == sub
au_data = stim_au[idx, :]
idx = []
for ii in range(au_data.shape[0]):
au_on = np.where(au_data[ii, :] > 0)[0]
this_idx= '_'.join(
[f'{au_names[iii]}-{int(100 * au_data[ii, iii])}'
for iii in au_on]
)
if not this_idx:
this_idx = 'empty'
idx.append(this_idx)
df = | pd.DataFrame(au_data, columns=au_names, index=idx) | pandas.DataFrame |
# This scripts generates graphs for
# outputs of benchmarks
import argparse
import itertools
import os
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from matplotlib.lines import Line2D
LINE_STYLES = ["-", ":", "-.", "--"]
cmap = plt.cm.get_cmap('Dark2')
COLORS = [cmap(i) for i in range(5)]
MARKERS = ["o", "v", "^", "<", ">", "1", "2", "3", "4", "8", "s", "x", "D"]
def style_gen():
for line, color, marker in zip(
itertools.cycle(LINE_STYLES),
itertools.cycle(COLORS),
itertools.cycle(MARKERS)):
yield {
"line": line,
"color": color,
"marker": marker
}
class Data:
def __init__(self, filename):
self.raw_data = pd.read_csv(filename)
self.raw_data.drop("graph_set", axis=1, inplace=True)
exclude = ["tlevel-simple", "blevel-simple"]
self.raw_data = pd.DataFrame(
self.raw_data[~self.raw_data["scheduler_name"].isin(exclude)])
mins = self.raw_data.groupby(
["graph_id", "cluster_name", "bandwidth", "netmodel"]
)["time"].transform(pd.Series.min)
self.raw_data["score"] = self.raw_data["time"] / mins
def prepare(self,
cluster_name=None,
exclude_single=False,
netmodel="maxmin",
min_sched_interval=0.1,
imode="exact"):
rd = self.raw_data
if netmodel:
f = rd["netmodel"] == netmodel
else:
f = rd["netmodel"].isin(["simple", "maxmin"])
if min_sched_interval is not None:
f &= rd["min_sched_interval"] == min_sched_interval
else:
f &= rd["min_sched_interval"].isin([0.0, 0.1, 0.4, 1.6, 6.4])
if imode is not None:
f &= rd["imode"] == imode
else:
f &= rd["imode"].isin(["exact", "mean", "user"])
if cluster_name:
f &= rd["cluster_name"] == cluster_name
if cluster_name:
f &= rd["scheduler_name"] != "single"
return | pd.DataFrame(rd[f]) | pandas.DataFrame |
import json, os, logging
from typing import Tuple, Optional
import pandas as pd
from datetime import datetime
from jinja2 import Environment, FileSystemLoader, select_autoescape
from iplotter import ChartJSPlotter
from iplotter import GCPlotter
def read_bcl2fastq_stats_data_from_pandas(data: dict) -> Tuple[list, list, list]:
'''
A function for parsing Stats.json files from Illumina BCL2Fastq output
:param data: A dictionary containing the following keys
* ConversionResults
* UnknownBarcodes
:returns: Three lists
'''
try:
row_l = list()
row_s = list()
unknown_df = list()
for i in data.get('ConversionResults'):
lane_number = i.get('LaneNumber')
total_cluster_raw = i.get('TotalClustersRaw')
total_cluster_pf = i.get('TotalClustersPF')
total_yield = i.get('Yield')
row_l.append({
'Lane': lane_number,
'Total_cluster_raw': total_cluster_raw,
'Total_cluster_pf': total_cluster_pf,
'Total_yield': total_yield})
demux_results = i.get('DemuxResults')
for j in demux_results:
sample_id = j.get('SampleId')
sample_name = j.get('SampleName')
index = j.get('IndexMetrics')[0].get('IndexSequence')
num_reads = j.get('NumberReads')
yield_val = j.get('Yield')
perfect_barcodes = j['IndexMetrics'][0]['MismatchCounts']['0']
yield_q30 = 0
qual_score_sum = 0
read_metrics = j.get('ReadMetrics')
for read in read_metrics:
q30_bases = int(read.get('YieldQ30'))
yield_q30 += q30_bases
qual_score = int(read.get('QualityScoreSum'))
qual_score_sum += qual_score
row_s.append({
'Lane': lane_number,
'Sample_ID': sample_id,
'Sample_Name': sample_name,
'Index_seq': index,
'Num_reads': num_reads,
'Perfect_barcode': perfect_barcodes,
'Yield_q30': yield_q30,
'Yield': int(yield_val),
'Qual_score_sum': qual_score_sum})
for unknown_entry in data.get('UnknownBarcodes'):
lane_id = unknown_entry.get('Lane')
barcodes = unknown_entry.get('Barcodes')
for barcode, read in barcodes.items():
unknown_df.\
append({
'Lane': lane_id,
'Barcode': barcode,
'Reads': read })
return row_l, row_s, unknown_df
except Exception as e:
raise ValueError(e)
def read_data_via_pandas(data_path: list) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
'''
A function for reading list of Stats.json files from Illumina BCL2FASTQ output
:param data_path: A list of Stats.json file paths
:returns: Three Pandas DataFrames
'''
try:
summary_records = pd.DataFrame()
sample_records = pd.DataFrame()
undetermined_records = pd.DataFrame()
for f in data_path:
with open(f, 'r') as fp:
json_data = json.load(fp)
row_l, row_s, unknown_df = \
read_bcl2fastq_stats_data_from_pandas(json_data)
summary_records = \
pd.concat([summary_records, pd.DataFrame(row_l)], ignore_index=True)
sample_records = \
pd.concat([sample_records, | pd.DataFrame(row_s) | pandas.DataFrame |
import time
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import savefig
from sklearn import preprocessing
from sklearn.model_selection import KFold
from sklearn.naive_bayes import MultinomialNB
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from operator import itemgetter
import math
import csv
attributeType = ["qualitative", "numerical", "qualitative", "qualitative", "numerical", "qualitative", "qualitative",
"numerical", "qualitative", "qualitative", "numerical", "qualitative", "numerical", "qualitative",
"qualitative", "numerical", "qualitative", "numerical", "qualitative", "qualitative"]
def performLabelEncoding(dataframe):
le = preprocessing.LabelEncoder()
i = 0
# For every column
for column in dataframe:
# Excluding the last two
if i == 20:
break
# If attribute is qualitative
if attributeType[i] == "qualitative":
# Label encode it
dataframe[column] = le.fit_transform(dataframe[column])
i += 1
return dataframe
def createPlots(dataframe):
good = dataframe[dataframe["Label"] == 1]
bad = dataframe[dataframe["Label"] == 2]
i = 0
# For every column
for column in dataframe:
# Excluding the last two
if i == 20:
break
# If attribute is qualitative
if attributeType[i] == "qualitative":
plt.title(column + " Good")
good[column].value_counts().plot(kind='bar')
name = "output/Attribute" + str(i + 1) + "_" + "good.png"
savefig(name)
plt.figure()
plt.title(column + " Bad")
bad[column].value_counts().plot(kind='bar')
name = "output/Attribute" + str(i + 1) + "_" + "bad.png"
savefig(name)
if i < 19:
plt.figure()
# If attribute is numerical
elif attributeType[i] == "numerical":
plt.title(column + " Good")
good.boxplot(column)
name = "output/Attribute" + str(i + 1) + "_" + "good.png"
savefig(name)
plt.figure()
plt.title(column + " Bad")
bad.boxplot(column)
name = "output/Attribute" + str(i + 1) + "_" + "bad.png"
savefig(name)
if i < 19:
plt.figure()
i += 1
def classifiers(dataframe):
kf = KFold(n_splits=10)
attributeColumns = dataframe.iloc[:, 0:20]
svm_accuracy = 0
# Run SVM
print("Running SVM...(this might take some time)")
for train_index, test_index in kf.split(dataframe):
X_train_counts = np.array(attributeColumns)[train_index]
X_test_counts = np.array(attributeColumns)[test_index]
clf_cv = svm.SVC(gamma=1.0, C=1.0, kernel="rbf").fit(X_train_counts,
np.array(dataframe["Label"])[train_index])
yPred = clf_cv.predict(X_test_counts)
svm_accuracy += accuracy_score(np.array(dataframe["Label"])[test_index], yPred)
svm_accuracy /= 10
print("SVM Accuracy: ", svm_accuracy)
rf_accuracy = 0
# Run Random Forests
print("Running Random Forest...")
for train_index, test_index in kf.split(dataframe):
X_train_counts = np.array(attributeColumns)[train_index]
X_test_counts = np.array(attributeColumns)[test_index]
clf_cv = RandomForestClassifier().fit(X_train_counts, np.array(dataframe["Label"])[train_index])
yPred = clf_cv.predict(X_test_counts)
rf_accuracy += accuracy_score(np.array(dataframe["Label"])[test_index], yPred)
rf_accuracy /= 10
print("Random Forest Accuracy: ", rf_accuracy)
nb_accuracy = 0
# Run Naive Bayes
print("Running Naive Bayes...")
for train_index, test_index in kf.split(dataframe):
X_train_counts = np.array(attributeColumns)[train_index]
X_test_counts = np.array(attributeColumns)[test_index]
clf_cv = MultinomialNB().fit(X_train_counts, np.array(dataframe["Label"])[train_index])
yPred = clf_cv.predict(X_test_counts)
nb_accuracy += accuracy_score(np.array(dataframe["Label"])[test_index], yPred)
nb_accuracy /= 10
print("Naive Bayes Accuracy: ", nb_accuracy)
# Output to a .csv file
out_file = open("output/EvaluationMetric_10fold.csv", 'w')
wr = csv.writer(out_file, delimiter="\t")
firstLine = ["Statistic Measure", "Naive Bayes", "Random Forest", "SVM"]
wr.writerow(firstLine)
secondLine = ["Accuracy", nb_accuracy, rf_accuracy, svm_accuracy]
wr.writerow(secondLine)
def predictions(dataframe, test_dataframe):
test_dataframe = performLabelEncoding(test_dataframe)
# Convert to numpy array only the attributes (exclude label & id)
X_train = np.array(dataframe.iloc[:, 0:20])
X_test = np.array(test_dataframe.iloc[:, 0:20])
clf_cv = RandomForestClassifier().fit(X_train, np.array(dataframe["Label"]))
predicted = clf_cv.predict(X_test)
# Output to a .csv file
out_file = open("output/testSet_Predictions.csv", 'w')
wr = csv.writer(out_file, delimiter="\t")
firstLine = ["Client_ID", "Predicted_Label"]
# Write the first line
wr.writerow(firstLine)
# For every prediction
for i in range(len(test_dataframe)):
# If its good
if predicted[i] == 1:
line = [int(test_dataframe["Id"][i]), "Good"]
# If its bad
else:
line = [int(test_dataframe["Id"][i]), "Bad"]
# Write the line
wr.writerow(line)
def entropy(dataframe, attribute):
attributeFrequency = {}
entropy = 0.0
# For every row of the dataframe, count the frequencies per value
for i in range(len(dataframe)):
value = dataframe[attribute][i]
if value in attributeFrequency:
attributeFrequency[value] += 1.0
else:
attributeFrequency[value] = 1.0
# For each value apply the entropy formula
for frequency in attributeFrequency.values():
entropy += (-frequency / len(dataframe)) * math.log(frequency / len(dataframe), 2)
return entropy
def informationGain(dataframe, attribute):
attributeFrequency = {}
subsetEntropy = 0.0
# For every row of the dataframe, count the frequencies per value
for i in range(len(dataframe)):
value = dataframe[attribute][i]
if value in attributeFrequency:
attributeFrequency[value] += 1.0
else:
attributeFrequency[value] = 1.0
# For each value apply the information gain formula
for keyValue in attributeFrequency.keys():
weight = attributeFrequency[keyValue] / sum(attributeFrequency.values())
dataframeSubset = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.preprocessing import StandardScaler
from scipy import signal
from scipy.io import loadmat
from sklearn.metrics import confusion_matrix
import os
from tensorflow.keras.models import Sequential, Model, load_model
import datetime
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow import keras as K
from tqdm import tqdm
from sklearn.decomposition import PCA
import scipy as sp
def rms(data):
return np.sqrt(np.mean(data ** 2))
def hist(data, nbins=20):
histsig, bin_edges = np.histogram(data, bins=nbins)
return tuple(histsig)
def entropy(data):
pk = sp.stats.rv_histogram(np.histogram(data, bins=20)).pdf(data)
return sp.stats.entropy(pk)
def kurtosis(data):
return sp.stats.kurtosis(data)
def zero_cross(data):
return len(np.where(np.diff(np.sign(data)))[0]) / len(data)
def min(data):
return np.min(data)
def max(data):
return np.max(data)
def mean(data):
return np.mean(data)
def median(data):
return np.median(data)
def fft(data):
return np.fft.fft(data)
def psd(data):
return np.abs(np.fft.fft(data)) ** 2
def get_data(path, file):
mat = loadmat(os.path.join(path, file))
data = pd.DataFrame(mat['emg'])
data['stimulus'] = mat['restimulus']
data['repetition'] = mat['repetition']
return data
def normalise(data, train_reps):
x = [np.where(data.values[:, 13] == rep) for rep in train_reps]
indices = np.squeeze(np.concatenate(x, axis=-1))
train_data = data.iloc[indices, :]
train_data = data.reset_index(drop=True)
scaler = StandardScaler(with_mean=True,
with_std=True,
copy=False).fit(train_data.iloc[:, :12])
scaled = scaler.transform(data.iloc[:, :12])
normalised = pd.DataFrame(scaled)
normalised['stimulus'] = data['stimulus']
normalised['repetition'] = data['repetition']
return normalised
def filter_data(data, f, butterworth_order=4, btype='lowpass'):
emg_data = data.values[:, :12]
f_sampling = 2000
nyquist = f_sampling / 2
if isinstance(f, int):
fc = f / nyquist
else:
fc = list(f)
for i in range(len(f)):
fc[i] = fc[i] / nyquist
b, a = signal.butter(butterworth_order, fc, btype=btype)
transpose = emg_data.T.copy()
for i in range(len(transpose)):
transpose[i] = (signal.lfilter(b, a, transpose[i]))
filtered = pd.DataFrame(transpose.T)
filtered['stimulus'] = data['stimulus']
filtered['repetition'] = data['repetition']
return filtered
def rectify(data):
return abs(data)
def windowing(data, reps, gestures, win_len, win_stride):
if reps:
x = [np.where(data.values[:, 13] == rep) for rep in reps]
indices = np.squeeze(np.concatenate(x, axis=-1))
data = data.iloc[indices, :]
data = data.reset_index(drop=True)
if gestures:
x = [np.where(data.values[:, 12] == move) for move in gestures]
indices = np.squeeze(np.concatenate(x, axis=-1))
data = data.iloc[indices, :]
data = data.reset_index(drop=True)
idx = [i for i in range(win_len, len(data), win_stride)]
X = np.zeros([len(idx), win_len, len(data.columns) - 2])
y = np.zeros([len(idx), ])
reps = np.zeros([len(idx), ])
for i, end in enumerate(idx):
start = end - win_len
X[i] = data.iloc[start:end, 0:12].values
y[i] = data.iloc[end, 12]
reps[i] = data.iloc[end, 13]
return X, y, reps
def train_model(model, X_train_wind, y_train_wind, X_test_wind, y_test_wind, save_to, epoch=300):
from tensorflow import keras as K
opt_adam = K.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='categorical_crossentropy', optimizer=opt_adam, metrics=['categorical_accuracy'])
# log_dir="logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=30)
mc = ModelCheckpoint(save_to + '_best_model.h5', monitor='val_categorical_accuracy', mode='max', verbose=1,
save_best_only=True)
history = model.fit(x=X_train_wind, y=y_train_wind, epochs=epoch, shuffle=True,
verbose=1,
validation_data=(X_test_wind, y_test_wind), callbacks=[es, mc])
saved_model = load_model(save_to + '_best_model.h5')
# evaluate the model
_, train_acc = saved_model.evaluate(X_train_wind, y_train_wind, verbose=0)
_, test_acc = saved_model.evaluate(X_test_wind, y_test_wind, verbose=0)
print('Train: %.3f, Test: %.3f' % (train_acc, test_acc))
return history, saved_model
def get_categorical(y):
return pd.get_dummies(pd.Series(y)).values
def plot_cnf_matrix(saved_model, X_valid_cv, target):
y_pred = saved_model.predict(X_valid_cv)
model_predictions = [list(y_pred[i]).index(y_pred[i].max()) + 1 for i in range(len(y_pred))]
conf_mx = confusion_matrix(target, model_predictions)
plt.matshow(conf_mx)
plt.show()
def feature_extractor(features, shape, data):
l = | pd.DataFrame() | pandas.DataFrame |
"""Network rerouting loss maps
"""
import os
import sys
from collections import OrderedDict
import numpy as np
import geopandas as gpd
import pandas as pd
import cartopy.crs as ccrs
import matplotlib as mpl
import cartopy.io.shapereader as shpreader
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from shapely.geometry import LineString
from atra.utils import *
mpl.style.use('ggplot')
mpl.rcParams['font.size'] = 10.
mpl.rcParams['font.family'] = 'tahoma'
mpl.rcParams['axes.labelsize'] = 10.
mpl.rcParams['xtick.labelsize'] = 9.
mpl.rcParams['ytick.labelsize'] = 9.
def plot_ranges(input_data, division_factor,x_label, y_label,plot_title,plot_color,plot_file_path,ylimit=None,yticks_loc=None,y_ticks_labels=None):
fig, ax = plt.subplots(figsize=(8, 4))
# vals_min_max = list(zip(*list(h for h in input_data.itertuples(index=False))))
vals_min_max = []
for a, b in input_data.itertuples(index=False):
if a < b:
min_, max_ = a, b
else:
min_, max_ = b, a
vals_min_max.append((min_, max_))
vals_min_max.sort(key=lambda el: el[1])
vals_min_max = list(zip(*vals_min_max))
percentlies = 100.0*np.arange(0,len(vals_min_max[0]))/len(vals_min_max[0])
ax.plot(percentlies,
1.0*np.array(vals_min_max[0])/division_factor,
linewidth=0.5,
color=plot_color
)
ax.plot(percentlies,
1.0*np.array(vals_min_max[1])/division_factor,
linewidth=0.5,
color=plot_color
)
ax.fill_between(percentlies,
1.0*np.array(vals_min_max[0])/division_factor,
1.0*np.array(vals_min_max[1])/division_factor,
alpha=0.5,
edgecolor=None,
facecolor=plot_color
)
if 'BCR' in y_label:
ax.plot(np.arange(0,100),
np.array([1]*100),
linewidth=0.5,
color='red',
label = 'BCR = 1'
)
# ax.set_xscale('log')
ax.legend(loc='upper left')
if ylimit:
max_val = input_data.max().max()
y_ticks_labels += [str(int(max_val/division_factor))]
ax.set_ylim(bottom=-0.5,top=ylimit/division_factor)
plt.yticks(yticks_loc,y_ticks_labels)
# ax.set_yscale('log')
# ax.tick_params(axis='x', rotation=45)
plt.xlabel(x_label, fontweight='bold')
plt.ylabel(y_label, fontweight='bold')
plt.title(plot_title)
plt.tight_layout()
plt.savefig(plot_file_path, dpi=500)
plt.close()
def plot_many_ranges(input_dfs, division_factor,x_label, y_label,plot_title,plot_color,plot_labels,plot_file_path):
fig, ax = plt.subplots(figsize=(8, 4))
length = []
for i in range(len(input_dfs)):
input_data = input_dfs[i]
vals_min_max = []
for a, b in input_data.itertuples(index=False):
if a < b:
min_, max_ = a, b
else:
min_, max_ = b, a
vals_min_max.append((min_, max_))
# vals_min_max.sort(key=lambda el: el[1])
vals_min_max = list(zip(*vals_min_max))
percentlies = 100.0*np.arange(0,len(vals_min_max[0]))/len(vals_min_max[0])
length.append(len(vals_min_max[0]))
ax.plot(percentlies,
1.0*np.array(vals_min_max[0])/division_factor,
linewidth=0.5,
color=plot_color[i]
)
ax.plot(percentlies,
1.0*np.array(vals_min_max[1])/division_factor,
linewidth=0.5,
color=plot_color[i]
)
ax.fill_between(percentlies,
1.0*np.array(vals_min_max[0])/division_factor,
1.0*np.array(vals_min_max[1])/division_factor,
alpha=0.5,
edgecolor=None,
facecolor=plot_color[i],
label = plot_labels[i]
)
length = max(length)
if 'BCR' in y_label:
ax.plot(np.arange(0,100),
np.array([1]*100),
linewidth=0.5,
color='red',
label = 'BCR = 1'
)
# ax.set_xscale('log')
ax.set_yscale('log')
# ax.tick_params(axis='x', rotation=45)
ax.legend(loc='upper left')
plt.xlabel(x_label, fontweight='bold')
plt.ylabel(y_label, fontweight='bold')
plt.title(plot_title)
plt.tight_layout()
plt.savefig(plot_file_path, dpi=500)
plt.close()
def change_to_infinity(x,dividend_column,divisor_column):
if x[divisor_column] == 0 and x[dividend_column] == 0:
return 0
elif x[divisor_column] == 0 and x[dividend_column] > 0:
return 1e9
elif x[divisor_column] == 0 and x[dividend_column] < 0:
return -1e9
else:
return 100.0*(x[dividend_column] - x[divisor_column])/x[divisor_column]
def main():
config = load_config()
data_path = config['paths']['data']
duration = 10
change_colors = ['#1a9850','#66bd63','#a6d96a','#d9ef8b','#fee08b','#fdae61','#f46d43','#d73027','#969696']
change_labels = ['< -100','-100 to -50','-50 to -10','-10 to 0','0 to 10','10 to 50','50 to 100',' > 100','No change/value']
change_ranges = [(-1e10,-100),(-100,-50),(-50,-10),(-10,0),(0.001,10),(10,50),(50,100),(100,1e10)]
region_file_path = os.path.join(config['paths']['data'], 'network',
'rail_edges.shp')
region_file = gpd.read_file(region_file_path,encoding='utf-8')
fail_file = pd.read_csv(os.path.join(config['paths']['output'], 'failure_results','minmax_combined_scenarios',
'single_edge_failures_minmax_rail_100_percent_disrupt_multi_modal.csv'))
fail_file = fail_file[fail_file['max_tr_loss'] < 1e7]
region_file = pd.merge(region_file[['edge_id',
'geometry']],fail_file[['edge_id',
'min_tr_loss',
'max_tr_loss']],how='left',on=['edge_id']).fillna(0)
flow_file = pd.read_csv(os.path.join(config['paths']['output'], 'failure_results','minmax_combined_scenarios',
'single_edge_failures_minmax_rail_100_percent_disrupt.csv'))
region_file = pd.merge(region_file,flow_file[['edge_id',
'min_econ_impact',
'max_econ_impact']],how='left',on=['edge_id']).fillna(0)
fail_file = pd.merge(fail_file,flow_file[['edge_id',
'min_econ_impact',
'max_econ_impact']],how='outer', on=['edge_id']).fillna(0)
del flow_file
flow_file = pd.read_csv(os.path.join(config['paths']['output'], 'risk_results',
'rail_hazard_intersections_risk_weights.csv'))
fail_file = pd.merge(fail_file,flow_file,how='left', on=['edge_id']).fillna(0)
del flow_file
fail_file['min_eael'] = duration*fail_file['risk_wt']*fail_file['min_econ_impact']
fail_file['max_eael'] = duration*fail_file['risk_wt']*fail_file['max_econ_impact']
fail_file['min_eael_multimodal'] = duration*fail_file['risk_wt']*fail_file['min_tr_loss']
fail_file['max_eael_multimodal'] = duration*fail_file['risk_wt']*fail_file['max_tr_loss']
# fail_file.to_csv('test.csv')
fail_file = fail_file.groupby(['edge_id',
'climate_scenario'])[
'min_eael_multimodal','min_eael',
'max_eael_multimodal','max_eael'].sum().reset_index()
# fail_file.to_csv('test.csv')
fail_file_min = fail_file.groupby(['edge_id'])['min_eael_multimodal','min_eael'].min().reset_index()
fail_file_max = fail_file.groupby(['edge_id'])['max_eael_multimodal','max_eael'].max().reset_index()
del fail_file
region_file = pd.merge(region_file,fail_file_min,how='left',on=['edge_id']).fillna(0)
region_file = pd.merge(region_file,fail_file_max,how='left',on=['edge_id']).fillna(0)
del fail_file_min,fail_file_max
flow_file = pd.read_csv(os.path.join(config['paths']['output'], 'flow_mapping_combined',
'weighted_flows_rail_100_percent.csv'))
region_file = | pd.merge(region_file,flow_file,how='left', on=['edge_id']) | pandas.merge |
#!/home/bryanfeeney/anaconda3/bin/python3.6
#
# Simple script that uses the Microsoft Light Gradient-Boosted Machine-Learnign
# toolkit to make predictions *separately* for each value.
#
from datetime import date, timedelta, datetime
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
import sys
import json
import psycopg2
FutureDaysToCalculate = 16
WeeksOfHistoryForMinTrainigData = 20
WeeksOfHistoryForFeature = 7
WeeksOfHistoryForFeatureOnValidate = 3
TrainingTimePeriodCount = 6
def load_data_csv (cumul_sales_path, cumul_sales_query_path, items_path, stores_path, query_start_date=None):
"""
Loads four datasets from the file-system in CSV format:
cumul_sale_path is the cumulative sales data, should be the last 12 months
cumul_sale_query_path enumerates the things to predict
items is item data
stores is store data
query_start_date if this is None, it's inferred from the first row of the cumul_sales_query_path documents. If
this is not None, then cumul_sales_query rows before this date are removed.
"""
cumul_sales = pd.read_csv(
cumul_sales_path,
usecols=[1, 2, 3, 4, 5],
dtype={'onpromotion': bool},
converters={'unit_sales': lambda u: np.log1p(float(u)) if float(u) > 0 else 0},
parse_dates=["date"]
)
if cumul_sales_query_path is not None:
cumul_sales_query = pd.read_csv(
cumul_sales_query_path,
usecols=[0, 1, 2, 3, 4],
dtype={'onpromotion': bool},
parse_dates=["date"],
)
if query_start_date is None:
query_start_date = str(cumul_sales_query.iloc[0,1]).split(" ")[0]
else:
cumul_sales_query = cumul_sales_query[cumul_sales_query.date >= query_start_date]
cumul_sales_query = cumul_sales_query.set_index(
['store_nbr', 'item_nbr', 'date']
)
items = pd.read_csv(
items_tbl,
).set_index("item_nbr")
stores = pd.read_csv(
stores_tbl
).set_index("store_nbr")
return cumul_sales, cumul_sales_query, query_start_date, items, stores
def load_data_sql (cumul_sales_path, cumul_sales_query_path, items_path, stores_path, query_start_date=None):
"""
Loads three datasets from the file-system in CSV format:
cumul_sale_path is the cumulative sales data, should be the last 12 months
cumul_sale_query_path enumerates the things to predict
items is item data
stores is store data
"""
with open('db.json') as f:
conf = json.load(f)
print (str(conf))
conn_str = "host={} dbname={} user={} password={}".format(conf['host'], conf['database'], conf['user'], conf['passw'])
conn = psycopg2.connect(conn_str)
cumul_sales_query = | pd.DataFrame() | pandas.DataFrame |
""" generates lists of SARS-CoV-2 samples which occurred before a particular date
Also generates a dictionary of reference compressed sequences
And a subset of these
Together, these can be passed to a ram_persistence object which
can be used instead of an fn3persistence object to test the performance of PCA, or for other
unit testing purposes.
Also useful for investigating how PCA detected the ingress of new strains over time.
Uses public cog metadata downloaded from COG-UK 7/4/2021, saved in
testdata/pca/cog_metadata.csv.gz, and requires access to an fn3persistence object containing the same data.
To run:
pipenv run python3 utils/make_temporal_subsets.py
"""
import os
import pandas as pd
import datetime
import gzip
import pickle
import progressbar
import random
from findn.mongoStore import fn3persistence
from findn.common_utils import ConfigManager
# open connection to existing covid datastore
config_file = os.path.join("demos", "covid", "covid_config_v3.json")
cfm = ConfigManager(config_file)
CONFIG = cfm.read_config()
PERSIST = fn3persistence(dbname=CONFIG["SERVERNAME"], connString=CONFIG["FNPERSISTENCE_CONNSTRING"], debug=CONFIG["DEBUGMODE"])
inputfile = "/data/software/fn4dev/testdata/pca/cog_metadata.csv.gz"
outputdir = "/data/data/pca/subsets" # or wherever
# get samples which are in server
extant_sample_ids = PERSIST.guids()
print("There are {0} samples in the server".format(len(extant_sample_ids)))
# read metadata file into pandas
with gzip.open(inputfile, "rt") as f:
df = | pd.read_csv(f) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.